1
The following changes since commit b0dd9a7d6dd15a6898e9c585b521e6bec79b25aa:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Open 8.2 development tree (2023-08-22 07:14:07 -0700)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230823
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to 05e09d2a830a74d651ca6106e2002ec4f7b6997a:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
tcg: spelling fixes (2023-08-23 13:20:47 -0700)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
accel/*: Widen pc/saved_insn for *_sw_breakpoint
14
tcg/optimize: Remove in-flight mask data from OptContext
15
accel/tcg: Replace remaining target_ulong in system-mode accel
15
fpu: Add float*_muladd_scalbn
16
tcg: spelling fixes
16
fpu: Remove float_muladd_halve_result
17
tcg: Document bswap, hswap, wswap byte patterns
17
fpu: Add float_round_nearest_even_max
18
tcg: Introduce negsetcond opcodes
18
fpu: Add float_muladd_suppress_add_product_zero
19
tcg: Fold deposit with zero to and
19
target/hexagon: Use float32_muladd
20
tcg: Unify TCG_TARGET_HAS_extr[lh]_i64_i32
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
21
tcg/i386: Drop BYTEH deposits for 64-bit
22
tcg/i386: Allow immediate as input to deposit
23
target/*: Use tcg_gen_negsetcond_*
24
21
25
----------------------------------------------------------------
22
----------------------------------------------------------------
26
Anton Johansson via (9):
23
Ilya Leoshkevich (1):
27
accel/kvm: Widen pc/saved_insn for kvm_sw_breakpoint
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
28
accel/hvf: Widen pc/saved_insn for hvf_sw_breakpoint
29
sysemu/kvm: Use vaddr for kvm_arch_[insert|remove]_hw_breakpoint
30
sysemu/hvf: Use vaddr for hvf_arch_[insert|remove]_hw_breakpoint
31
include/exec: Replace target_ulong with abi_ptr in cpu_[st|ld]*()
32
include/exec: typedef abi_ptr to vaddr in softmmu
33
include/exec: Widen tlb_hit/tlb_hit_page()
34
accel/tcg: Widen address arg in tlb_compare_set()
35
accel/tcg: Update run_on_cpu_data static assert
36
25
37
Mark Cave-Ayland (1):
26
Pierrick Bouvier (1):
38
docs/devel/tcg-ops: fix missing newlines in "Host vector operations"
27
plugins: optimize cpu_index code generation
39
28
40
Michael Tokarev (1):
29
Richard Henderson (70):
41
tcg: spelling fixes
30
tcg/optimize: Split out finish_bb, finish_ebb
31
tcg/optimize: Split out fold_affected_mask
32
tcg/optimize: Copy mask writeback to fold_masks
33
tcg/optimize: Split out fold_masks_zs
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
35
tcg/optimize: Change representation of s_mask
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
37
tcg/optimize: Introduce const value accessors for TempOptInfo
38
tcg/optimize: Use fold_masks_zs in fold_and
39
tcg/optimize: Use fold_masks_zs in fold_andc
40
tcg/optimize: Use fold_masks_zs in fold_bswap
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
42
tcg/optimize: Use fold_masks_z in fold_ctpop
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
44
tcg/optimize: Compute sign mask in fold_deposit
45
tcg/optimize: Use finish_folding in fold_divide
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
47
tcg/optimize: Use fold_masks_s in fold_eqv
48
tcg/optimize: Use fold_masks_z in fold_extract
49
tcg/optimize: Use finish_folding in fold_extract2
50
tcg/optimize: Use fold_masks_zs in fold_exts
51
tcg/optimize: Use fold_masks_z in fold_extu
52
tcg/optimize: Use fold_masks_zs in fold_movcond
53
tcg/optimize: Use finish_folding in fold_mul*
54
tcg/optimize: Use fold_masks_s in fold_nand
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
42
100
43
Philippe Mathieu-Daudé (9):
101
include/exec/translator.h | 14 -
44
docs/devel/tcg-ops: Bury mentions of trunc_shr_i64_i32()
102
include/fpu/softfloat-types.h | 2 +
45
tcg/tcg-op: Document bswap16_i32() byte pattern
103
include/fpu/softfloat.h | 14 +-
46
tcg/tcg-op: Document bswap16_i64() byte pattern
104
include/hw/core/tcg-cpu-ops.h | 13 +
47
tcg/tcg-op: Document bswap32_i32() byte pattern
105
target/alpha/cpu.h | 2 +
48
tcg/tcg-op: Document bswap32_i64() byte pattern
106
target/arm/internals.h | 2 +
49
tcg/tcg-op: Document bswap64_i64() byte pattern
107
target/avr/cpu.h | 2 +
50
tcg/tcg-op: Document hswap_i32/64() byte pattern
108
target/hexagon/cpu.h | 2 +
51
tcg/tcg-op: Document wswap_i64() byte pattern
109
target/hexagon/fma_emu.h | 3 -
52
target/cris: Fix a typo in gen_swapr()
110
target/hppa/cpu.h | 2 +
53
111
target/i386/tcg/helper-tcg.h | 2 +
54
Richard Henderson (28):
112
target/loongarch/internals.h | 2 +
55
target/m68k: Use tcg_gen_deposit_i32 in gen_partset_reg
113
target/m68k/cpu.h | 2 +
56
tcg/i386: Drop BYTEH deposits for 64-bit
114
target/microblaze/cpu.h | 2 +
57
tcg: Fold deposit with zero to and
115
target/mips/tcg/tcg-internal.h | 2 +
58
tcg/i386: Allow immediate as input to deposit_*
116
target/openrisc/cpu.h | 2 +
59
tcg: Unify TCG_TARGET_HAS_extr[lh]_i64_i32
117
target/ppc/cpu.h | 2 +
60
tcg: Introduce negsetcond opcodes
118
target/riscv/cpu.h | 3 +
61
tcg: Use tcg_gen_negsetcond_*
119
target/rx/cpu.h | 2 +
62
target/alpha: Use tcg_gen_movcond_i64 in gen_fold_mzero
120
target/s390x/s390x-internal.h | 2 +
63
target/arm: Use tcg_gen_negsetcond_*
121
target/sh4/cpu.h | 2 +
64
target/m68k: Use tcg_gen_negsetcond_*
122
target/sparc/cpu.h | 2 +
65
target/openrisc: Use tcg_gen_negsetcond_*
123
target/sparc/helper.h | 4 +-
66
target/ppc: Use tcg_gen_negsetcond_*
124
target/tricore/cpu.h | 2 +
67
target/sparc: Use tcg_gen_movcond_i64 in gen_edge
125
target/xtensa/cpu.h | 2 +
68
target/tricore: Replace gen_cond_w with tcg_gen_negsetcond_tl
126
accel/tcg/cpu-exec.c | 8 +-
69
tcg/ppc: Implement negsetcond_*
127
accel/tcg/plugin-gen.c | 9 +
70
tcg/ppc: Use the Set Boolean Extension
128
accel/tcg/translate-all.c | 8 +-
71
tcg/aarch64: Implement negsetcond_*
129
fpu/softfloat.c | 63 +--
72
tcg/arm: Implement negsetcond_i32
130
target/alpha/cpu.c | 1 +
73
tcg/riscv: Implement negsetcond_*
131
target/alpha/translate.c | 4 +-
74
tcg/s390x: Implement negsetcond_*
132
target/arm/cpu.c | 1 +
75
tcg/sparc64: Implement negsetcond_*
133
target/arm/tcg/cpu-v7m.c | 1 +
76
tcg/i386: Merge tcg_out_brcond{32,64}
134
target/arm/tcg/helper-a64.c | 6 +-
77
tcg/i386: Merge tcg_out_setcond{32,64}
135
target/arm/tcg/translate.c | 5 +-
78
tcg/i386: Merge tcg_out_movcond{32,64}
136
target/avr/cpu.c | 1 +
79
tcg/i386: Use CMP+SBB in tcg_out_setcond
137
target/avr/translate.c | 6 +-
80
tcg/i386: Clear dest first in tcg_out_setcond if possible
138
target/hexagon/cpu.c | 1 +
81
tcg/i386: Use shift in tcg_out_setcond
139
target/hexagon/fma_emu.c | 496 ++++++---------------
82
tcg/i386: Implement negsetcond_*
140
target/hexagon/op_helper.c | 125 ++----
83
141
target/hexagon/translate.c | 4 +-
84
docs/devel/tcg-ops.rst | 15 +-
142
target/hppa/cpu.c | 1 +
85
accel/tcg/atomic_template.h | 16 +-
143
target/hppa/translate.c | 4 +-
86
include/exec/cpu-all.h | 4 +-
144
target/i386/tcg/tcg-cpu.c | 1 +
87
include/exec/cpu_ldst.h | 28 +--
145
target/i386/tcg/translate.c | 5 +-
88
include/sysemu/hvf.h | 12 +-
146
target/loongarch/cpu.c | 1 +
89
include/sysemu/kvm.h | 12 +-
147
target/loongarch/tcg/translate.c | 4 +-
90
include/tcg/tcg-op-common.h | 4 +
148
target/m68k/cpu.c | 1 +
91
include/tcg/tcg-op.h | 2 +
149
target/m68k/translate.c | 4 +-
92
include/tcg/tcg-opc.h | 6 +-
150
target/microblaze/cpu.c | 1 +
93
include/tcg/tcg.h | 4 +-
151
target/microblaze/translate.c | 4 +-
94
tcg/aarch64/tcg-target.h | 5 +-
152
target/mips/cpu.c | 1 +
95
tcg/arm/tcg-target.h | 1 +
153
target/mips/tcg/translate.c | 4 +-
96
tcg/i386/tcg-target-con-set.h | 2 +-
154
target/openrisc/cpu.c | 1 +
97
tcg/i386/tcg-target-con-str.h | 1 -
155
target/openrisc/translate.c | 4 +-
98
tcg/i386/tcg-target.h | 9 +-
156
target/ppc/cpu_init.c | 1 +
99
tcg/loongarch64/tcg-target.h | 6 +-
157
target/ppc/translate.c | 4 +-
100
tcg/mips/tcg-target.h | 5 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
101
tcg/ppc/tcg-target.h | 5 +-
159
target/riscv/translate.c | 4 +-
102
tcg/riscv/tcg-target.h | 5 +-
160
target/rx/cpu.c | 1 +
103
tcg/s390x/tcg-target.h | 5 +-
161
target/rx/translate.c | 4 +-
104
tcg/sparc64/tcg-target.h | 5 +-
162
target/s390x/cpu.c | 1 +
105
tcg/tci/tcg-target.h | 5 +-
163
target/s390x/tcg/translate.c | 4 +-
106
accel/hvf/hvf-accel-ops.c | 4 +-
164
target/sh4/cpu.c | 1 +
107
accel/hvf/hvf-all.c | 2 +-
165
target/sh4/translate.c | 4 +-
108
accel/kvm/kvm-all.c | 3 +-
166
target/sparc/cpu.c | 1 +
109
accel/tcg/cputlb.c | 17 +-
167
target/sparc/fop_helper.c | 8 +-
110
target/alpha/translate.c | 7 +-
168
target/sparc/translate.c | 84 ++--
111
target/arm/hvf/hvf.c | 4 +-
169
target/tricore/cpu.c | 1 +
112
target/arm/kvm64.c | 6 +-
170
target/tricore/translate.c | 5 +-
113
target/arm/tcg/translate-a64.c | 22 +--
171
target/xtensa/cpu.c | 1 +
114
target/arm/tcg/translate.c | 12 +-
172
target/xtensa/translate.c | 4 +-
115
target/cris/translate.c | 20 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
116
target/i386/hvf/hvf.c | 4 +-
174
tests/tcg/multiarch/system/memory.c | 9 +-
117
target/i386/kvm/kvm.c | 8 +-
175
fpu/softfloat-parts.c.inc | 16 +-
118
target/m68k/translate.c | 35 ++--
176
75 files changed, 866 insertions(+), 1009 deletions(-)
119
target/openrisc/translate.c | 6 +-
120
target/ppc/kvm.c | 13 +-
121
target/riscv/vector_helper.c | 2 +-
122
target/rx/op_helper.c | 6 +-
123
target/s390x/kvm/kvm.c | 6 +-
124
target/sparc/translate.c | 17 +-
125
target/tricore/translate.c | 16 +-
126
tcg/optimize.c | 78 +++++++-
127
tcg/tcg-op-gvec.c | 6 +-
128
tcg/tcg-op.c | 151 ++++++++++++---
129
tcg/tcg.c | 9 +-
130
target/ppc/translate/fixedpoint-impl.c.inc | 6 +-
131
target/ppc/translate/vmx-impl.c.inc | 8 +-
132
tcg/aarch64/tcg-target.c.inc | 14 +-
133
tcg/arm/tcg-target.c.inc | 19 +-
134
tcg/i386/tcg-target.c.inc | 291 ++++++++++++++++++-----------
135
tcg/ppc/tcg-target.c.inc | 149 ++++++++++-----
136
tcg/riscv/tcg-target.c.inc | 49 ++++-
137
tcg/s390x/tcg-target.c.inc | 78 +++++---
138
tcg/sparc64/tcg-target.c.inc | 40 +++-
139
55 files changed, 832 insertions(+), 433 deletions(-)
140
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
From: Anton Johansson via <qemu-devel@nongnu.org>
1
Call them directly from the opcode switch statement in tcg_optimize,
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
2
4
3
Changes the signature of the target-defined functions for
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
inserting/removing hvf hw breakpoints. The address and length arguments
5
are now of vaddr type, which both matches the type used internally in
6
accel/hvf/hvf-all.c and makes the api target-agnostic.
7
8
Signed-off-by: Anton Johansson <anjo@rev.ng>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230807155706.9580-5-anjo@rev.ng>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
7
---
13
include/sysemu/hvf.h | 6 ++----
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
14
target/arm/hvf/hvf.c | 4 ++--
9
1 file changed, 31 insertions(+), 16 deletions(-)
15
target/i386/hvf/hvf.c | 4 ++--
16
3 files changed, 6 insertions(+), 8 deletions(-)
17
10
18
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
19
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
20
--- a/include/sysemu/hvf.h
13
--- a/tcg/optimize.c
21
+++ b/include/sysemu/hvf.h
14
+++ b/tcg/optimize.c
22
@@ -XXX,XX +XXX,XX @@ int hvf_sw_breakpoints_active(CPUState *cpu);
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
23
24
int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
25
int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
26
-int hvf_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len,
27
- int type);
28
-int hvf_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len,
29
- int type);
30
+int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
31
+int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
32
void hvf_arch_remove_all_hw_breakpoints(void);
33
34
/*
35
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/arm/hvf/hvf.c
38
+++ b/target/arm/hvf/hvf.c
39
@@ -XXX,XX +XXX,XX @@ int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
40
return 0;
41
}
42
43
-int hvf_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type)
44
+int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
45
{
46
switch (type) {
47
case GDB_BREAKPOINT_HW:
48
@@ -XXX,XX +XXX,XX @@ int hvf_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type)
49
}
16
}
50
}
17
}
51
18
52
-int hvf_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type)
19
+static void finish_bb(OptContext *ctx)
53
+int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
20
+{
21
+ /* We only optimize memory barriers across basic blocks. */
22
+ ctx->prev_mb = NULL;
23
+}
24
+
25
+static void finish_ebb(OptContext *ctx)
26
+{
27
+ finish_bb(ctx);
28
+ /* We only optimize across extended basic blocks. */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
30
+ remove_mem_copy_all(ctx);
31
+}
32
+
33
static void finish_folding(OptContext *ctx, TCGOp *op)
54
{
34
{
55
switch (type) {
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
56
case GDB_BREAKPOINT_HW:
36
int i, nb_oargs;
57
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
37
58
index XXXXXXX..XXXXXXX 100644
38
- /*
59
--- a/target/i386/hvf/hvf.c
39
- * We only optimize extended basic blocks. If the opcode ends a BB
60
+++ b/target/i386/hvf/hvf.c
40
- * and is not a conditional branch, reset all temp data.
61
@@ -XXX,XX +XXX,XX @@ int hvf_arch_remove_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp)
41
- */
62
return -ENOSYS;
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
47
- }
48
- return;
49
- }
50
-
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
63
}
64
}
64
65
65
-int hvf_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type)
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
66
+int hvf_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
67
{
68
}
68
return -ENOSYS;
69
op->opc = INDEX_op_br;
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
76
+
77
+ finish_bb(ctx);
78
+ return true;
69
}
79
}
70
80
71
-int hvf_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, int type)
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
72
+int hvf_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
73
{
83
CASE_OP_32_64_VEC(xor):
74
return -ENOSYS;
84
done = fold_xor(&ctx, op);
75
}
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
76
--
97
--
77
2.34.1
98
2.43.0
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
1
From: Michael Tokarev <mjt@tls.msk.ru>
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
2
5
3
Acked-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
5
Message-Id: <20230823065335.1919380-4-mjt@tls.msk.ru>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/aarch64/tcg-target.c.inc | 2 +-
9
tcg/optimize.c | 17 ++++++++++++++---
9
tcg/arm/tcg-target.c.inc | 10 ++++++----
10
1 file changed, 14 insertions(+), 3 deletions(-)
10
tcg/riscv/tcg-target.c.inc | 4 ++--
11
3 files changed, 9 insertions(+), 7 deletions(-)
12
11
13
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/aarch64/tcg-target.c.inc
14
--- a/tcg/optimize.c
16
+++ b/tcg/aarch64/tcg-target.c.inc
15
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
18
#if !defined(CONFIG_SOFTMMU)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
19
/*
27
/*
20
* Note that XZR cannot be encoded in the address base register slot,
28
* 32-bit ops generate 32-bit results, which for the purpose of
21
- * as that actaully encodes SP. Depending on the guest, we may need
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
22
+ * as that actually encodes SP. Depending on the guest, we may need
30
if (ctx->type == TCG_TYPE_I32) {
23
* to zero-extend the guest address via the address index register slot,
31
z_mask = (int32_t)z_mask;
24
* therefore we need to load even a zero guest base into a register.
32
s_mask |= MAKE_64BIT_MASK(32, 32);
25
*/
33
- ctx->z_mask = z_mask;
26
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
34
- ctx->s_mask = s_mask;
27
index XXXXXXX..XXXXXXX 100644
35
}
28
--- a/tcg/arm/tcg-target.c.inc
36
29
+++ b/tcg/arm/tcg-target.c.inc
37
if (z_mask == 0) {
30
@@ -XXX,XX +XXX,XX @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
31
case TCG_COND_LEU:
39
}
32
case TCG_COND_GTU:
40
- return false;
33
case TCG_COND_GEU:
41
+
34
- /* We perform a conditional comparision. If the high half is
42
+ ts = arg_temp(op->args[0]);
35
- equal, then overwrite the flags with the comparison of the
43
+ reset_ts(ctx, ts);
36
- low half. The resulting flags cover the whole. */
44
+
37
+ /*
45
+ ti = ts_info(ts);
38
+ * We perform a conditional comparison. If the high half is
46
+ ti->z_mask = z_mask;
39
+ * equal, then overwrite the flags with the comparison of the
47
+ ti->s_mask = s_mask;
40
+ * low half. The resulting flags cover the whole.
48
+ return true;
41
+ */
49
}
42
tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
43
tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
44
return cond;
45
@@ -XXX,XX +XXX,XX @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
46
50
47
/*
51
/*
48
* Note that TCGReg references Q-registers.
49
- * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
50
+ * Q-regno = 2 * D-regno, so shift left by 1 while inserting.
51
*/
52
static uint32_t encode_vd(TCGReg rd)
53
{
54
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/riscv/tcg-target.c.inc
57
+++ b/tcg/riscv/tcg-target.c.inc
58
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
59
60
static const int tcg_target_reg_alloc_order[] = {
61
/* Call saved registers */
62
- /* TCG_REG_S0 reservered for TCG_AREG0 */
63
+ /* TCG_REG_S0 reserved for TCG_AREG0 */
64
TCG_REG_S1,
65
TCG_REG_S2,
66
TCG_REG_S3,
67
@@ -XXX,XX +XXX,XX @@ typedef enum {
68
/* Zba: Bit manipulation extension, address generation */
69
OPC_ADD_UW = 0x0800003b,
70
71
- /* Zbb: Bit manipulation extension, basic bit manipulaton */
72
+ /* Zbb: Bit manipulation extension, basic bit manipulation */
73
OPC_ANDN = 0x40007033,
74
OPC_CLZ = 0x60001013,
75
OPC_CLZW = 0x6000101b,
76
--
52
--
77
2.34.1
53
2.43.0
78
79
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
1
Pass a rexw parameter instead of duplicating the functions.
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
2
5
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
tcg/i386/tcg-target.c.inc | 110 +++++++++++++++++---------------------
9
tcg/optimize.c | 21 ++++++---------------
8
1 file changed, 49 insertions(+), 61 deletions(-)
10
1 file changed, 6 insertions(+), 15 deletions(-)
9
11
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
14
--- a/tcg/optimize.c
13
+++ b/tcg/i386/tcg-target.c.inc
15
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
17
* Record "zero" and "sign" masks for the single output of @op.
18
* See TempOptInfo definition of z_mask and s_mask.
19
* If z_mask allows, fold the output to constant zero.
20
+ * The passed s_mask may be augmented by z_mask.
21
*/
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
uint64_t z_mask, uint64_t s_mask)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
26
ti = ts_info(ts);
27
ti->z_mask = z_mask;
28
- ti->s_mask = s_mask;
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
return true;
31
}
32
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
34
default:
35
g_assert_not_reached();
15
}
36
}
37
- s_mask = smask_from_zmask(z_mask);
38
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
16
}
57
}
17
58
18
-static void tcg_out_brcond32(TCGContext *s, TCGCond cond,
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
19
- TCGArg arg1, TCGArg arg2, int const_arg2,
60
default:
20
- TCGLabel *label, int small)
61
g_assert_not_reached();
21
+static void tcg_out_brcond(TCGContext *s, int rexw, TCGCond cond,
62
}
22
+ TCGArg arg1, TCGArg arg2, int const_arg2,
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
23
+ TCGLabel *label, bool small)
64
return false;
24
{
25
- tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
26
+ tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
27
tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
28
}
65
}
29
66
30
-#if TCG_TARGET_REG_BITS == 64
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
31
-static void tcg_out_brcond64(TCGContext *s, TCGCond cond,
68
return true;
32
- TCGArg arg1, TCGArg arg2, int const_arg2,
69
}
33
- TCGLabel *label, int small)
70
ctx->z_mask = z_mask;
34
-{
71
- ctx->s_mask = smask_from_zmask(z_mask);
35
- tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
72
36
- tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
73
return fold_masks(ctx, op);
37
-}
74
}
38
-#else
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
39
-/* XXX: we implement it at the target level to avoid having to
76
}
40
- handle cross basic blocks temporaries */
77
41
+#if TCG_TARGET_REG_BITS == 32
78
ctx->z_mask = z_mask;
42
static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
79
- ctx->s_mask = smask_from_zmask(z_mask);
43
- const int *const_args, int small)
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
44
+ const int *const_args, bool small)
81
return true;
45
{
82
}
46
TCGLabel *label_next = gen_new_label();
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
47
TCGLabel *label_this = arg_label(args[5]);
84
int width = 8 * memop_size(mop);
48
85
49
switch(args[4]) {
86
if (width < 64) {
50
case TCG_COND_EQ:
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
51
- tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
88
- if (!(mop & MO_SIGN)) {
52
- label_next, 1);
89
+ if (mop & MO_SIGN) {
53
- tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3],
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
54
- label_this, small);
91
+ } else {
55
+ tcg_out_brcond(s, 0, TCG_COND_NE, args[0], args[2], const_args[2],
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
56
+ label_next, 1);
93
- ctx->s_mask <<= 1;
57
+ tcg_out_brcond(s, 0, TCG_COND_EQ, args[1], args[3], const_args[3],
94
}
58
+ label_this, small);
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
106
}
107
108
ctx->z_mask = 1;
109
- ctx->s_mask = smask_from_zmask(1);
110
return false;
111
112
do_setcond_const:
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
59
break;
114
break;
60
case TCG_COND_NE:
115
CASE_OP_32_64(ld8u):
61
- tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2],
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
62
- label_this, small);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
63
- tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3],
64
- label_this, small);
65
+ tcg_out_brcond(s, 0, TCG_COND_NE, args[0], args[2], const_args[2],
66
+ label_this, small);
67
+ tcg_out_brcond(s, 0, TCG_COND_NE, args[1], args[3], const_args[3],
68
+ label_this, small);
69
break;
118
break;
70
case TCG_COND_LT:
119
CASE_OP_32_64(ld16s):
71
- tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
72
- label_this, small);
73
+ tcg_out_brcond(s, 0, TCG_COND_LT, args[1], args[3], const_args[3],
74
+ label_this, small);
75
tcg_out_jxx(s, JCC_JNE, label_next, 1);
76
- tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
77
- label_this, small);
78
+ tcg_out_brcond(s, 0, TCG_COND_LTU, args[0], args[2], const_args[2],
79
+ label_this, small);
80
break;
121
break;
81
case TCG_COND_LE:
122
CASE_OP_32_64(ld16u):
82
- tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3],
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
83
- label_this, small);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
84
+ tcg_out_brcond(s, 0, TCG_COND_LT, args[1], args[3], const_args[3],
85
+ label_this, small);
86
tcg_out_jxx(s, JCC_JNE, label_next, 1);
87
- tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
88
- label_this, small);
89
+ tcg_out_brcond(s, 0, TCG_COND_LEU, args[0], args[2], const_args[2],
90
+ label_this, small);
91
break;
125
break;
92
case TCG_COND_GT:
126
case INDEX_op_ld32s_i64:
93
- tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
94
- label_this, small);
95
+ tcg_out_brcond(s, 0, TCG_COND_GT, args[1], args[3], const_args[3],
96
+ label_this, small);
97
tcg_out_jxx(s, JCC_JNE, label_next, 1);
98
- tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
99
- label_this, small);
100
+ tcg_out_brcond(s, 0, TCG_COND_GTU, args[0], args[2], const_args[2],
101
+ label_this, small);
102
break;
128
break;
103
case TCG_COND_GE:
129
case INDEX_op_ld32u_i64:
104
- tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3],
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
105
- label_this, small);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
106
+ tcg_out_brcond(s, 0, TCG_COND_GT, args[1], args[3], const_args[3],
107
+ label_this, small);
108
tcg_out_jxx(s, JCC_JNE, label_next, 1);
109
- tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
110
- label_this, small);
111
+ tcg_out_brcond(s, 0, TCG_COND_GEU, args[0], args[2], const_args[2],
112
+ label_this, small);
113
break;
114
case TCG_COND_LTU:
115
- tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
116
- label_this, small);
117
+ tcg_out_brcond(s, 0, TCG_COND_LTU, args[1], args[3], const_args[3],
118
+ label_this, small);
119
tcg_out_jxx(s, JCC_JNE, label_next, 1);
120
- tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2],
121
- label_this, small);
122
+ tcg_out_brcond(s, 0, TCG_COND_LTU, args[0], args[2], const_args[2],
123
+ label_this, small);
124
break;
125
case TCG_COND_LEU:
126
- tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3],
127
- label_this, small);
128
+ tcg_out_brcond(s, 0, TCG_COND_LTU, args[1], args[3], const_args[3],
129
+ label_this, small);
130
tcg_out_jxx(s, JCC_JNE, label_next, 1);
131
- tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2],
132
- label_this, small);
133
+ tcg_out_brcond(s, 0, TCG_COND_LEU, args[0], args[2], const_args[2],
134
+ label_this, small);
135
break;
136
case TCG_COND_GTU:
137
- tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
138
- label_this, small);
139
+ tcg_out_brcond(s, 0, TCG_COND_GTU, args[1], args[3], const_args[3],
140
+ label_this, small);
141
tcg_out_jxx(s, JCC_JNE, label_next, 1);
142
- tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2],
143
- label_this, small);
144
+ tcg_out_brcond(s, 0, TCG_COND_GTU, args[0], args[2], const_args[2],
145
+ label_this, small);
146
break;
147
case TCG_COND_GEU:
148
- tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3],
149
- label_this, small);
150
+ tcg_out_brcond(s, 0, TCG_COND_GTU, args[1], args[3], const_args[3],
151
+ label_this, small);
152
tcg_out_jxx(s, JCC_JNE, label_next, 1);
153
- tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2],
154
- label_this, small);
155
+ tcg_out_brcond(s, 0, TCG_COND_GEU, args[0], args[2], const_args[2],
156
+ label_this, small);
157
break;
132
break;
158
default:
133
default:
159
g_assert_not_reached();
134
g_assert_not_reached();
160
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
161
tcg_out_modrm(s, OPC_POPCNT + rexw, a0, a1);
162
break;
163
164
- case INDEX_op_brcond_i32:
165
- tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
166
+ OP_32_64(brcond):
167
+ tcg_out_brcond(s, rexw, a2, a0, a1, const_args[1],
168
+ arg_label(args[3]), 0);
169
break;
170
case INDEX_op_setcond_i32:
171
tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2);
172
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
173
}
174
break;
175
176
- case INDEX_op_brcond_i64:
177
- tcg_out_brcond64(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
178
- break;
179
case INDEX_op_setcond_i64:
180
tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2);
181
break;
182
--
135
--
183
2.34.1
136
2.43.0
184
185
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Change the representation from sign bit repetitions to all bits equal
2
to the sign bit, including the sign bit itself.
2
3
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
The previous format has a problem in that it is difficult to recreate
4
Message-Id: <20230823145542.79633-3-philmd@linaro.org>
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
23
---
7
tcg/tcg-op.c | 27 +++++++++++++++++++--------
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
8
1 file changed, 19 insertions(+), 8 deletions(-)
25
1 file changed, 15 insertions(+), 49 deletions(-)
9
26
10
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg-op.c
29
--- a/tcg/optimize.c
13
+++ b/tcg/tcg-op.c
30
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg)
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
33
uint64_t val;
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
37
} TempOptInfo;
38
39
typedef struct OptContext {
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
41
42
/* In flight values from optimization. */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
47
} OptContext;
48
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
51
-{
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
54
-}
55
-
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
74
-}
75
-
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
89
{
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
15
}
106
}
16
}
107
}
17
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
18
+/*
109
* The passed s_mask may be augmented by z_mask.
19
+ * bswap16_i64: 16-bit byte swap on the low bits of a 64-bit value.
110
*/
20
+ *
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
21
+ * Byte pattern: xxxxxxxxab -> yyyyyyyyba
112
- uint64_t z_mask, uint64_t s_mask)
22
+ *
113
+ uint64_t z_mask, int64_t s_mask)
23
+ * With TCG_BSWAP_IZ, x == zero, else undefined.
24
+ * With TCG_BSWAP_OZ, y == zero, with TCG_BSWAP_OS y == sign, else undefined.
25
+ */
26
void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
27
{
114
{
28
/* Only one extension flag may be present. */
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
29
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
116
TCGTemp *ts;
30
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
117
TempOptInfo *ti;
31
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
118
+ int rep;
32
119
33
- tcg_gen_shri_i64(t0, arg, 8);
120
/* Only single-output opcodes are supported here. */
34
+ /* arg = ......ab or xxxxxxab */
121
tcg_debug_assert(def->nb_oargs == 1);
35
+ tcg_gen_shri_i64(t0, arg, 8); /* t0 = .......a or .xxxxxxa */
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
36
if (!(flags & TCG_BSWAP_IZ)) {
123
*/
37
- tcg_gen_ext8u_i64(t0, t0);
124
if (ctx->type == TCG_TYPE_I32) {
38
+ tcg_gen_ext8u_i64(t0, t0); /* t0 = .......a */
125
z_mask = (int32_t)z_mask;
39
}
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
40
127
+ s_mask |= INT32_MIN;
41
if (flags & TCG_BSWAP_OS) {
128
}
42
- tcg_gen_shli_i64(t1, arg, 56);
129
43
- tcg_gen_sari_i64(t1, t1, 48);
130
if (z_mask == 0) {
44
+ tcg_gen_shli_i64(t1, arg, 56); /* t1 = b....... */
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
45
+ tcg_gen_sari_i64(t1, t1, 48); /* t1 = ssssssb. */
132
46
} else if (flags & TCG_BSWAP_OZ) {
133
ti = ts_info(ts);
47
- tcg_gen_ext8u_i64(t1, arg);
134
ti->z_mask = z_mask;
48
- tcg_gen_shli_i64(t1, t1, 8);
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
49
+ tcg_gen_ext8u_i64(t1, arg); /* t1 = .......b */
136
+
50
+ tcg_gen_shli_i64(t1, t1, 8); /* t1 = ......b. */
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
51
} else {
138
+ rep = clz64(~s_mask);
52
- tcg_gen_shli_i64(t1, arg, 8);
139
+ rep = MAX(rep, clz64(z_mask));
53
+ tcg_gen_shli_i64(t1, arg, 8); /* t1 = xxxxxab. */
140
+ rep = MAX(rep - 1, 0);
54
}
141
+ ti->s_mask = INT64_MIN >> rep;
55
142
+
56
- tcg_gen_or_i64(ret, t0, t1);
143
return true;
57
+ tcg_gen_or_i64(ret, t0, t1); /* ret = ......ba (OZ) */
144
}
58
+ /* ssssssba (OS) */
145
59
+ /* xxxxxaba (no flag) */
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
60
tcg_temp_free_i64(t0);
147
61
tcg_temp_free_i64(t1);
148
ctx->z_mask = z_mask;
149
ctx->s_mask = s_mask;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
152
return true;
153
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
157
ctx->s_mask = s_mask;
158
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
166
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
168
- ctx->s_mask = smask_from_smask(s_mask);
169
170
return fold_masks(ctx, op);
62
}
171
}
63
--
172
--
64
2.34.1
173
2.43.0
65
66
diff view generated by jsdifflib
1
From: Anton Johansson via <qemu-devel@nongnu.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
Changes the signature of the target-defined functions for
4
inserting/removing kvm hw breakpoints. The address and length arguments
5
are now of vaddr type, which both matches the type used internally in
6
accel/kvm/kvm-all.c and makes the api target-agnostic.
7
8
Signed-off-by: Anton Johansson <anjo@rev.ng>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230807155706.9580-4-anjo@rev.ng>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
3
---
13
include/sysemu/kvm.h | 6 ++----
4
tcg/optimize.c | 9 +++++----
14
target/arm/kvm64.c | 6 ++----
5
1 file changed, 5 insertions(+), 4 deletions(-)
15
target/i386/kvm/kvm.c | 8 +++-----
16
target/ppc/kvm.c | 13 ++++++-------
17
target/s390x/kvm/kvm.c | 6 ++----
18
5 files changed, 15 insertions(+), 24 deletions(-)
19
6
20
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
21
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
22
--- a/include/sysemu/kvm.h
9
--- a/tcg/optimize.c
23
+++ b/include/sysemu/kvm.h
10
+++ b/tcg/optimize.c
24
@@ -XXX,XX +XXX,XX @@ int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
25
struct kvm_sw_breakpoint *bp);
12
remove_mem_copy_all(ctx);
26
int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
27
struct kvm_sw_breakpoint *bp);
28
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
29
- target_ulong len, int type);
30
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
31
- target_ulong len, int type);
32
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
33
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
34
void kvm_arch_remove_all_hw_breakpoints(void);
35
36
void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
37
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/arm/kvm64.c
40
+++ b/target/arm/kvm64.c
41
@@ -XXX,XX +XXX,XX @@ void kvm_arm_init_debug(KVMState *s)
42
return;
43
}
13
}
44
14
45
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
46
- target_ulong len, int type)
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
47
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
48
{
17
{
49
switch (type) {
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
50
case GDB_BREAKPOINT_HW:
19
int i, nb_oargs;
51
@@ -XXX,XX +XXX,XX @@ int kvm_arch_insert_hw_breakpoint(target_ulong addr,
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
21
ts_info(ts)->z_mask = ctx->z_mask;
22
}
52
}
23
}
24
+ return true;
53
}
25
}
54
26
55
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
27
/*
56
- target_ulong len, int type)
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
57
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
29
fold_xi_to_x(ctx, op, 0)) {
58
{
30
return true;
59
switch (type) {
60
case GDB_BREAKPOINT_HW:
61
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/i386/kvm/kvm.c
64
+++ b/target/i386/kvm/kvm.c
65
@@ -XXX,XX +XXX,XX @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
66
kvm_rate_limit_on_bus_lock();
67
}
31
}
68
32
- return false;
69
-#ifdef CONFIG_XEN_EMU
33
+ return finish_folding(ctx, op);
70
+#ifdef CONFIG_XEN_EMU
71
/*
72
* If the callback is asserted as a GSI (or PCI INTx) then check if
73
* vcpu_info->evtchn_upcall_pending has been cleared, and deassert
74
@@ -XXX,XX +XXX,XX @@ static int find_hw_breakpoint(target_ulong addr, int len, int type)
75
return -1;
76
}
34
}
77
35
78
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
36
/* We cannot as yet do_constant_folding with vectors. */
79
- target_ulong len, int type)
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
80
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
38
fold_xi_to_x(ctx, op, 0)) {
81
{
39
return true;
82
switch (type) {
40
}
83
case GDB_BREAKPOINT_HW:
41
- return false;
84
@@ -XXX,XX +XXX,XX @@ int kvm_arch_insert_hw_breakpoint(target_ulong addr,
42
+ return finish_folding(ctx, op);
85
return 0;
86
}
43
}
87
44
88
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
89
- target_ulong len, int type)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
90
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
47
op->args[4] = arg_new_constant(ctx, bl);
91
{
48
op->args[5] = arg_new_constant(ctx, bh);
92
int n;
49
}
93
50
- return false;
94
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
51
+ return finish_folding(ctx, op);
95
index XXXXXXX..XXXXXXX 100644
96
--- a/target/ppc/kvm.c
97
+++ b/target/ppc/kvm.c
98
@@ -XXX,XX +XXX,XX @@ static int find_hw_watchpoint(target_ulong addr, int *flag)
99
return -1;
100
}
52
}
101
53
102
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
103
- target_ulong len, int type)
104
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
105
{
106
- if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
107
+ const unsigned breakpoint_index = nb_hw_breakpoint + nb_hw_watchpoint;
108
+ if (breakpoint_index >= ARRAY_SIZE(hw_debug_points)) {
109
return -ENOBUFS;
110
}
111
112
- hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
113
- hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
114
+ hw_debug_points[breakpoint_index].addr = addr;
115
+ hw_debug_points[breakpoint_index].type = type;
116
117
switch (type) {
118
case GDB_BREAKPOINT_HW:
119
@@ -XXX,XX +XXX,XX @@ int kvm_arch_insert_hw_breakpoint(target_ulong addr,
120
return 0;
121
}
122
123
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
124
- target_ulong len, int type)
125
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
126
{
127
int n;
128
129
diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c
130
index XXXXXXX..XXXXXXX 100644
131
--- a/target/s390x/kvm/kvm.c
132
+++ b/target/s390x/kvm/kvm.c
133
@@ -XXX,XX +XXX,XX @@ static int insert_hw_breakpoint(target_ulong addr, int len, int type)
134
return 0;
135
}
136
137
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
138
- target_ulong len, int type)
139
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
140
{
141
switch (type) {
142
case GDB_BREAKPOINT_HW:
143
@@ -XXX,XX +XXX,XX @@ int kvm_arch_insert_hw_breakpoint(target_ulong addr,
144
return insert_hw_breakpoint(addr, len, type);
145
}
146
147
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
148
- target_ulong len, int type)
149
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
150
{
151
int size;
152
struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
153
--
55
--
154
2.34.1
56
2.43.0
diff view generated by jsdifflib
1
From: Anton Johansson via <qemu-devel@nongnu.org>
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
2
3
Widens the pc and saved_insn fields of hvf_sw_breakpoint from
4
target_ulong to vaddr. Other hvf_* functions accessing hvf_sw_breakpoint
5
are also widened to match.
6
7
Signed-off-by: Anton Johansson <anjo@rev.ng>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20230807155706.9580-3-anjo@rev.ng>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
4
---
12
include/sysemu/hvf.h | 6 +++---
5
tcg/optimize.c | 20 +++++++++++++++++---
13
accel/hvf/hvf-accel-ops.c | 4 ++--
6
1 file changed, 17 insertions(+), 3 deletions(-)
14
accel/hvf/hvf-all.c | 2 +-
15
3 files changed, 6 insertions(+), 6 deletions(-)
16
7
17
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
19
--- a/include/sysemu/hvf.h
10
--- a/tcg/optimize.c
20
+++ b/include/sysemu/hvf.h
11
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ DECLARE_INSTANCE_CHECKER(HVFState, HVF_STATE,
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
22
13
return ts_info(arg_temp(arg));
23
#ifdef NEED_CPU_H
24
struct hvf_sw_breakpoint {
25
- target_ulong pc;
26
- target_ulong saved_insn;
27
+ vaddr pc;
28
+ vaddr saved_insn;
29
int use_count;
30
QTAILQ_ENTRY(hvf_sw_breakpoint) entry;
31
};
32
33
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu,
34
- target_ulong pc);
35
+ vaddr pc);
36
int hvf_sw_breakpoints_active(CPUState *cpu);
37
38
int hvf_arch_insert_sw_breakpoint(CPUState *cpu, struct hvf_sw_breakpoint *bp);
39
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/accel/hvf/hvf-accel-ops.c
42
+++ b/accel/hvf/hvf-accel-ops.c
43
@@ -XXX,XX +XXX,XX @@ static void hvf_start_vcpu_thread(CPUState *cpu)
44
cpu, QEMU_THREAD_JOINABLE);
45
}
14
}
46
15
47
-static int hvf_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
16
+static inline bool ti_is_const(TempOptInfo *ti)
48
+static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
17
+{
18
+ return ti->is_const;
19
+}
20
+
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
22
+{
23
+ return ti->val;
24
+}
25
+
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
27
+{
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
29
+}
30
+
31
static inline bool ts_is_const(TCGTemp *ts)
49
{
32
{
50
struct hvf_sw_breakpoint *bp;
33
- return ts_info(ts)->is_const;
51
int err;
34
+ return ti_is_const(ts_info(ts));
52
@@ -XXX,XX +XXX,XX @@ static int hvf_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr le
53
return 0;
54
}
35
}
55
36
56
-static int hvf_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len)
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
57
+static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
58
{
38
{
59
struct hvf_sw_breakpoint *bp;
39
- TempOptInfo *ti = ts_info(ts);
60
int err;
40
- return ti->is_const && ti->val == val;
61
diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c
41
+ return ti_is_const_val(ts_info(ts), val);
62
index XXXXXXX..XXXXXXX 100644
63
--- a/accel/hvf/hvf-all.c
64
+++ b/accel/hvf/hvf-all.c
65
@@ -XXX,XX +XXX,XX @@ void assert_hvf_ok(hv_return_t ret)
66
abort();
67
}
42
}
68
43
69
-struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, target_ulong pc)
44
static inline bool arg_is_const(TCGArg arg)
70
+struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
71
{
72
struct hvf_sw_breakpoint *bp;
73
74
--
45
--
75
2.34.1
46
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
6
---
3
tcg/s390x/tcg-target.h | 4 +-
7
tcg/optimize.c | 13 ++++++++++---
4
tcg/s390x/tcg-target.c.inc | 78 +++++++++++++++++++++++++-------------
8
1 file changed, 10 insertions(+), 3 deletions(-)
5
2 files changed, 54 insertions(+), 28 deletions(-)
6
9
7
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/s390x/tcg-target.h
12
--- a/tcg/optimize.c
10
+++ b/tcg/s390x/tcg-target.h
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
12
#define TCG_TARGET_HAS_sextract_i32 0
15
return true;
13
#define TCG_TARGET_HAS_extract2_i32 0
14
#define TCG_TARGET_HAS_movcond_i32 1
15
-#define TCG_TARGET_HAS_negsetcond_i32 0
16
+#define TCG_TARGET_HAS_negsetcond_i32 1
17
#define TCG_TARGET_HAS_add2_i32 1
18
#define TCG_TARGET_HAS_sub2_i32 1
19
#define TCG_TARGET_HAS_mulu2_i32 0
20
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
21
#define TCG_TARGET_HAS_sextract_i64 0
22
#define TCG_TARGET_HAS_extract2_i64 0
23
#define TCG_TARGET_HAS_movcond_i64 1
24
-#define TCG_TARGET_HAS_negsetcond_i64 0
25
+#define TCG_TARGET_HAS_negsetcond_i64 1
26
#define TCG_TARGET_HAS_add2_i64 1
27
#define TCG_TARGET_HAS_sub2_i64 1
28
#define TCG_TARGET_HAS_mulu2_i64 1
29
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/s390x/tcg-target.c.inc
32
+++ b/tcg/s390x/tcg-target.c.inc
33
@@ -XXX,XX +XXX,XX @@ static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
34
}
16
}
35
17
36
static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
37
- TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
19
+{
38
+ TCGReg dest, TCGReg c1, TCGArg c2,
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
39
+ bool c2const, bool neg)
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
40
{
24
{
41
int cc;
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
42
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
43
@@ -XXX,XX +XXX,XX @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
27
44
/* Emit: d = 0, d = (cc ? 1 : d). */
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
29
{
46
tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
30
+ uint64_t z_mask;
47
- tcg_out_insn(s, RIEg, LOCGHI, dest, 1, cc);
31
+
48
+ tcg_out_insn(s, RIEg, LOCGHI, dest, neg ? -1 : 1, cc);
32
if (fold_const1(ctx, op)) {
49
return;
33
return true;
50
}
34
}
51
35
52
- restart:
36
switch (ctx->type) {
53
+ switch (cond) {
37
case TCG_TYPE_I32:
54
+ case TCG_COND_GEU:
38
- ctx->z_mask = 32 | 31;
55
+ case TCG_COND_LTU:
39
+ z_mask = 32 | 31;
56
+ case TCG_COND_LT:
40
break;
57
+ case TCG_COND_GE:
41
case TCG_TYPE_I64:
58
+ /* Swap operands so that we can use LEU/GTU/GT/LE. */
42
- ctx->z_mask = 64 | 63;
59
+ if (!c2const) {
43
+ z_mask = 64 | 63;
60
+ TCGReg t = c1;
44
break;
61
+ c1 = c2;
62
+ c2 = t;
63
+ cond = tcg_swap_cond(cond);
64
+ }
65
+ break;
66
+ default:
67
+ break;
68
+ }
69
+
70
switch (cond) {
71
case TCG_COND_NE:
72
/* X != 0 is X > 0. */
73
@@ -XXX,XX +XXX,XX @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
74
75
case TCG_COND_GTU:
76
case TCG_COND_GT:
77
- /* The result of a compare has CC=2 for GT and CC=3 unused.
78
- ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
79
+ /*
80
+ * The result of a compare has CC=2 for GT and CC=3 unused.
81
+ * ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit.
82
+ */
83
tgen_cmp(s, type, cond, c1, c2, c2const, true);
84
tcg_out_movi(s, type, dest, 0);
85
tcg_out_insn(s, RRE, ALCGR, dest, dest);
86
+ if (neg) {
87
+ if (type == TCG_TYPE_I32) {
88
+ tcg_out_insn(s, RR, LCR, dest, dest);
89
+ } else {
90
+ tcg_out_insn(s, RRE, LCGR, dest, dest);
91
+ }
92
+ }
93
return;
94
95
case TCG_COND_EQ:
96
@@ -XXX,XX +XXX,XX @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
97
98
case TCG_COND_LEU:
99
case TCG_COND_LE:
100
- /* As above, but we're looking for borrow, or !carry.
101
- The second insn computes d - d - borrow, or -1 for true
102
- and 0 for false. So we must mask to 1 bit afterward. */
103
+ /*
104
+ * As above, but we're looking for borrow, or !carry.
105
+ * The second insn computes d - d - borrow, or -1 for true
106
+ * and 0 for false. So we must mask to 1 bit afterward.
107
+ */
108
tgen_cmp(s, type, cond, c1, c2, c2const, true);
109
tcg_out_insn(s, RRE, SLBGR, dest, dest);
110
- tgen_andi(s, type, dest, 1);
111
- return;
112
-
113
- case TCG_COND_GEU:
114
- case TCG_COND_LTU:
115
- case TCG_COND_LT:
116
- case TCG_COND_GE:
117
- /* Swap operands so that we can use LEU/GTU/GT/LE. */
118
- if (!c2const) {
119
- TCGReg t = c1;
120
- c1 = c2;
121
- c2 = t;
122
- cond = tcg_swap_cond(cond);
123
- goto restart;
124
+ if (!neg) {
125
+ tgen_andi(s, type, dest, 1);
126
}
127
- break;
128
+ return;
129
130
default:
45
default:
131
g_assert_not_reached();
46
g_assert_not_reached();
132
@@ -XXX,XX +XXX,XX @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
47
}
133
cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
48
- return false;
134
/* Emit: d = 0, t = 1, d = (cc ? t : d). */
49
+ return fold_masks_z(ctx, op, z_mask);
135
tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
136
- tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
137
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, neg ? -1 : 1);
138
tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc);
139
}
50
}
140
51
141
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
142
break;
143
case INDEX_op_setcond_i32:
144
tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
145
- args[2], const_args[2]);
146
+ args[2], const_args[2], false);
147
+ break;
148
+ case INDEX_op_negsetcond_i32:
149
+ tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
150
+ args[2], const_args[2], true);
151
break;
152
case INDEX_op_movcond_i32:
153
tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
154
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
155
break;
156
case INDEX_op_setcond_i64:
157
tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
158
- args[2], const_args[2]);
159
+ args[2], const_args[2], false);
160
+ break;
161
+ case INDEX_op_negsetcond_i64:
162
+ tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
163
+ args[2], const_args[2], true);
164
break;
165
case INDEX_op_movcond_i64:
166
tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
167
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
168
case INDEX_op_rotr_i32:
169
case INDEX_op_rotr_i64:
170
case INDEX_op_setcond_i32:
171
+ case INDEX_op_negsetcond_i32:
172
return C_O1_I2(r, r, ri);
173
case INDEX_op_setcond_i64:
174
+ case INDEX_op_negsetcond_i64:
175
return C_O1_I2(r, r, rA);
176
177
case INDEX_op_clz_i64:
178
--
53
--
179
2.34.1
54
2.43.0
diff view generated by jsdifflib
1
Inserting a zero into a value, or inserting a value
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
into zero at offset 0 may be implemented with AND.
2
When we fold to and, use fold_and.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/optimize.c | 37 +++++++++++++++++++++++++++++++++++++
7
tcg/optimize.c | 35 +++++++++++++++++------------------
9
1 file changed, 37 insertions(+)
8
1 file changed, 17 insertions(+), 18 deletions(-)
10
9
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
16
15
17
static bool fold_deposit(OptContext *ctx, TCGOp *op)
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
18
{
17
{
19
+ TCGOpcode and_opc;
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
+
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
21
if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
20
+ int ofs = op->args[3];
22
uint64_t t1 = arg_info(op->args[1])->val;
21
+ int len = op->args[4];
23
uint64_t t2 = arg_info(op->args[2])->val;
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
24
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
25
return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
26
}
39
}
27
40
28
+ switch (ctx->type) {
41
/* Inserting a value into zero at offset 0. */
29
+ case TCG_TYPE_I32:
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
30
+ and_opc = INDEX_op_and_i32;
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
31
+ break;
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
32
+ case TCG_TYPE_I64:
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
33
+ and_opc = INDEX_op_and_i64;
46
34
+ break;
47
op->opc = and_opc;
35
+ default:
48
op->args[1] = op->args[2];
36
+ g_assert_not_reached();
49
op->args[2] = arg_new_constant(ctx, mask);
37
+ }
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
38
+
51
- return false;
39
+ /* Inserting a value into zero at offset 0. */
52
+ return fold_and(ctx, op);
40
+ if (arg_is_const(op->args[1])
53
}
41
+ && arg_info(op->args[1])->val == 0
54
42
+ && op->args[3] == 0) {
55
/* Inserting zero into a value. */
43
+ uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
56
- if (arg_is_const_val(op->args[2], 0)) {
44
+
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
45
+ op->opc = and_opc;
58
+ if (ti_is_const_val(t2, 0)) {
46
+ op->args[1] = op->args[2];
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
47
+ op->args[2] = temp_arg(tcg_constant_internal(ctx->type, mask));
60
48
+ ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
61
op->opc = and_opc;
49
+ return false;
62
op->args[2] = arg_new_constant(ctx, mask);
50
+ }
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
+
64
- return false;
52
+ /* Inserting zero into a value. */
65
+ return fold_and(ctx, op);
53
+ if (arg_is_const(op->args[2])
66
}
54
+ && arg_info(op->args[2])->val == 0) {
67
55
+ uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
56
+
69
- op->args[3], op->args[4],
57
+ op->opc = and_opc;
70
- arg_info(op->args[2])->z_mask);
58
+ op->args[2] = temp_arg(tcg_constant_internal(ctx->type, mask));
71
- return false;
59
+ ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
60
+ return false;
73
+ return fold_masks_z(ctx, op, z_mask);
61
+ }
74
}
62
+
75
63
ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
64
op->args[3], op->args[4],
65
arg_info(op->args[2])->z_mask);
66
--
77
--
67
2.34.1
78
2.43.0
68
69
diff view generated by jsdifflib
1
Using XOR first is both smaller and more efficient,
1
The input which overlaps the sign bit of the output can
2
though cannot be applied if it clobbers an input.
2
have its input s_mask propagated to the output s_mask.
3
3
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/i386/tcg-target.c.inc | 17 ++++++++++++++++-
7
tcg/optimize.c | 14 ++++++++++++--
8
1 file changed, 16 insertions(+), 1 deletion(-)
8
1 file changed, 12 insertions(+), 2 deletions(-)
9
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
12
--- a/tcg/optimize.c
13
+++ b/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
int const_arg2)
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
{
16
int ofs = op->args[3];
17
bool inv = false;
17
int len = op->args[4];
18
+ bool cleared;
18
+ int width;
19
19
TCGOpcode and_opc;
20
switch (cond) {
20
- uint64_t z_mask;
21
case TCG_COND_NE:
21
+ uint64_t z_mask, s_mask;
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
23
break;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
24
}
39
}
25
40
26
+ /*
41
+ /* The s_mask from the top portion of the deposit is still valid. */
27
+ * If dest does not overlap the inputs, clearing it first is preferred.
42
+ if (ofs + len == width) {
28
+ * The XOR breaks any false dependency for the low-byte write to dest,
43
+ s_mask = t2->s_mask << ofs;
29
+ * and is also one byte smaller than MOVZBL.
44
+ } else {
30
+ */
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
31
+ cleared = false;
32
+ if (dest != arg1 && (const_arg2 || dest != arg2)) {
33
+ tgen_arithr(s, ARITH_XOR, dest, dest);
34
+ cleared = true;
35
+ }
46
+ }
36
+
47
+
37
tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
38
tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
49
- return fold_masks_z(ctx, op, z_mask);
39
- tcg_out_ext8u(s, dest, dest);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
40
+
41
+ if (!cleared) {
42
+ tcg_out_ext8u(s, dest, dest);
43
+ }
44
}
51
}
45
52
46
#if TCG_TARGET_REG_BITS == 32
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
47
--
54
--
48
2.34.1
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
}
17
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
36
}
37
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
39
- & arg_info(op->args[2])->s_mask;
40
- return false;
41
+ s_mask = arg_info(op->args[1])->s_mask
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
44
}
45
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
From: Anton Johansson via <qemu-devel@nongnu.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
Changes the address type of the guest memory read/write functions from
4
target_ulong to abi_ptr. (abi_ptr is currently typedef'd to target_ulong
5
but that will change in a following commit.) This will reduce the
6
coupling between accel/ and target/.
7
8
Note: Function pointers that point to cpu_[st|ld]*() in target/riscv and
9
target/rx are also updated in this commit.
10
11
Signed-off-by: Anton Johansson <anjo@rev.ng>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-Id: <20230807155706.9580-6-anjo@rev.ng>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
3
---
16
accel/tcg/atomic_template.h | 16 ++++++++--------
4
tcg/optimize.c | 6 +++---
17
include/exec/cpu_ldst.h | 24 ++++++++++++------------
5
1 file changed, 3 insertions(+), 3 deletions(-)
18
accel/tcg/cputlb.c | 10 +++++-----
19
target/riscv/vector_helper.c | 2 +-
20
target/rx/op_helper.c | 6 +++---
21
5 files changed, 29 insertions(+), 29 deletions(-)
22
6
23
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
24
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
25
--- a/accel/tcg/atomic_template.h
9
--- a/tcg/optimize.c
26
+++ b/accel/tcg/atomic_template.h
10
+++ b/tcg/optimize.c
27
@@ -XXX,XX +XXX,XX @@
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
28
# define END _le
12
fold_xi_to_x(ctx, op, 1)) {
29
#endif
13
return true;
30
14
}
31
-ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
15
- return false;
32
+ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
16
+ return finish_folding(ctx, op);
33
ABI_TYPE cmpv, ABI_TYPE newv,
34
MemOpIdx oi, uintptr_t retaddr)
35
{
36
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
37
}
17
}
38
18
39
#if DATA_SIZE < 16
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
40
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
41
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
21
fold_xi_to_i(ctx, op, 0)) {
42
MemOpIdx oi, uintptr_t retaddr)
22
return true;
43
{
23
}
44
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
24
- return false;
45
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
25
+ return finish_folding(ctx, op);
46
}
26
}
47
27
48
#define GEN_ATOMIC_HELPER(X) \
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
49
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
50
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
30
tcg_opt_gen_movi(ctx, op2, rh, h);
51
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
31
return true;
52
{ \
32
}
53
DATA_TYPE *haddr, ret; \
33
- return false;
54
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(xor_fetch)
34
+ return finish_folding(ctx, op);
55
* of CF_PARALLEL's value, we'll trace just a read and a write.
56
*/
57
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
58
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
59
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
60
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
61
{ \
62
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
63
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
64
# define END _be
65
#endif
66
67
-ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
68
+ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
69
ABI_TYPE cmpv, ABI_TYPE newv,
70
MemOpIdx oi, uintptr_t retaddr)
71
{
72
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
73
}
35
}
74
36
75
#if DATA_SIZE < 16
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
76
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
77
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
78
MemOpIdx oi, uintptr_t retaddr)
79
{
80
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
81
@@ -XXX,XX +XXX,XX @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
82
}
83
84
#define GEN_ATOMIC_HELPER(X) \
85
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
86
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
87
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
88
{ \
89
DATA_TYPE *haddr, ret; \
90
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER(xor_fetch)
91
* of CF_PARALLEL's value, we'll trace just a read and a write.
92
*/
93
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
94
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
95
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
96
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
97
{ \
98
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
99
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
100
index XXXXXXX..XXXXXXX 100644
101
--- a/include/exec/cpu_ldst.h
102
+++ b/include/exec/cpu_ldst.h
103
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val,
104
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
105
MemOpIdx oi, uintptr_t ra);
106
107
-uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
108
+uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, abi_ptr addr,
109
uint32_t cmpv, uint32_t newv,
110
MemOpIdx oi, uintptr_t retaddr);
111
-uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
112
+uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, abi_ptr addr,
113
uint32_t cmpv, uint32_t newv,
114
MemOpIdx oi, uintptr_t retaddr);
115
-uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
116
+uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, abi_ptr addr,
117
uint32_t cmpv, uint32_t newv,
118
MemOpIdx oi, uintptr_t retaddr);
119
-uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
120
+uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, abi_ptr addr,
121
uint64_t cmpv, uint64_t newv,
122
MemOpIdx oi, uintptr_t retaddr);
123
-uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
124
+uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, abi_ptr addr,
125
uint32_t cmpv, uint32_t newv,
126
MemOpIdx oi, uintptr_t retaddr);
127
-uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
128
+uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, abi_ptr addr,
129
uint32_t cmpv, uint32_t newv,
130
MemOpIdx oi, uintptr_t retaddr);
131
-uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
132
+uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, abi_ptr addr,
133
uint64_t cmpv, uint64_t newv,
134
MemOpIdx oi, uintptr_t retaddr);
135
136
-#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
137
-TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
138
- (CPUArchState *env, target_ulong addr, TYPE val, \
139
+#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
140
+TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \
141
+ (CPUArchState *env, abi_ptr addr, TYPE val, \
142
MemOpIdx oi, uintptr_t retaddr);
143
144
#ifdef CONFIG_ATOMIC64
145
@@ -XXX,XX +XXX,XX @@ GEN_ATOMIC_HELPER_ALL(xchg)
146
#undef GEN_ATOMIC_HELPER_ALL
147
#undef GEN_ATOMIC_HELPER
148
149
-Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
150
+Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, abi_ptr addr,
151
Int128 cmpv, Int128 newv,
152
MemOpIdx oi, uintptr_t retaddr);
153
-Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
154
+Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, abi_ptr addr,
155
Int128 cmpv, Int128 newv,
156
MemOpIdx oi, uintptr_t retaddr);
157
158
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/accel/tcg/cputlb.c
161
+++ b/accel/tcg/cputlb.c
162
@@ -XXX,XX +XXX,XX @@ static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
163
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
164
}
165
166
-void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
167
+void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
168
MemOpIdx oi, uintptr_t retaddr)
169
{
170
helper_stb_mmu(env, addr, val, oi, retaddr);
171
plugin_store_cb(env, addr, oi);
172
}
173
174
-void cpu_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
175
+void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
176
MemOpIdx oi, uintptr_t retaddr)
177
{
178
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
179
@@ -XXX,XX +XXX,XX @@ void cpu_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
180
plugin_store_cb(env, addr, oi);
181
}
182
183
-void cpu_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
184
+void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
185
MemOpIdx oi, uintptr_t retaddr)
186
{
187
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
188
@@ -XXX,XX +XXX,XX @@ void cpu_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
189
plugin_store_cb(env, addr, oi);
190
}
191
192
-void cpu_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
193
+void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
194
MemOpIdx oi, uintptr_t retaddr)
195
{
196
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
197
@@ -XXX,XX +XXX,XX @@ void cpu_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
198
plugin_store_cb(env, addr, oi);
199
}
200
201
-void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
202
+void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
203
MemOpIdx oi, uintptr_t retaddr)
204
{
205
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
206
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
207
index XXXXXXX..XXXXXXX 100644
208
--- a/target/riscv/vector_helper.c
209
+++ b/target/riscv/vector_helper.c
210
@@ -XXX,XX +XXX,XX @@ static inline int vext_elem_mask(void *v0, int index)
211
}
212
213
/* elements operations for load and store */
214
-typedef void vext_ldst_elem_fn(CPURISCVState *env, target_ulong addr,
215
+typedef void vext_ldst_elem_fn(CPURISCVState *env, abi_ptr addr,
216
uint32_t idx, void *vd, uintptr_t retaddr);
217
218
#define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
219
diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c
220
index XXXXXXX..XXXXXXX 100644
221
--- a/target/rx/op_helper.c
222
+++ b/target/rx/op_helper.c
223
@@ -XXX,XX +XXX,XX @@ void helper_scmpu(CPURXState *env)
224
}
225
226
static uint32_t (* const cpu_ldufn[])(CPUArchState *env,
227
- target_ulong ptr,
228
+ abi_ptr ptr,
229
uintptr_t retaddr) = {
230
cpu_ldub_data_ra, cpu_lduw_data_ra, cpu_ldl_data_ra,
231
};
232
233
static uint32_t (* const cpu_ldfn[])(CPUArchState *env,
234
- target_ulong ptr,
235
+ abi_ptr ptr,
236
uintptr_t retaddr) = {
237
cpu_ldub_data_ra, cpu_lduw_data_ra, cpu_ldl_data_ra,
238
};
239
240
static void (* const cpu_stfn[])(CPUArchState *env,
241
- target_ulong ptr,
242
+ abi_ptr ptr,
243
uint32_t val,
244
uintptr_t retaddr) = {
245
cpu_stb_data_ra, cpu_stw_data_ra, cpu_stl_data_ra,
246
--
38
--
247
2.34.1
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Be careful not to call fold_masks_zs when the memory operation
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
11
1 file changed, 21 insertions(+), 5 deletions(-)
12
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
18
return fold_masks_s(ctx, op, s_mask);
19
}
20
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
23
{
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
43
+
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
45
+}
46
+
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
48
+{
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
50
+ ctx->prev_mb = NULL;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
break;
57
case INDEX_op_qemu_ld_a32_i32:
58
case INDEX_op_qemu_ld_a64_i32:
59
+ done = fold_qemu_ld_1reg(&ctx, op);
60
+ break;
61
case INDEX_op_qemu_ld_a32_i64:
62
case INDEX_op_qemu_ld_a64_i64:
63
+ if (TCG_TARGET_REG_BITS == 64) {
64
+ done = fold_qemu_ld_1reg(&ctx, op);
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
73
case INDEX_op_qemu_st8_a32_i32:
74
case INDEX_op_qemu_st8_a64_i32:
75
--
76
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
Stores have no output operands, and so need no further work.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/sparc64/tcg-target.h | 4 ++--
6
tcg/optimize.c | 11 +++++------
5
tcg/sparc64/tcg-target.c.inc | 40 +++++++++++++++++++++++++++---------
7
1 file changed, 5 insertions(+), 6 deletions(-)
6
2 files changed, 32 insertions(+), 12 deletions(-)
7
8
8
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/sparc64/tcg-target.h
11
--- a/tcg/optimize.c
11
+++ b/tcg/sparc64/tcg-target.h
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
13
#define TCG_TARGET_HAS_sextract_i32 0
14
{
14
#define TCG_TARGET_HAS_extract2_i32 0
15
/* Opcodes that touch guest memory stop the mb optimization. */
15
#define TCG_TARGET_HAS_movcond_i32 1
16
ctx->prev_mb = NULL;
16
-#define TCG_TARGET_HAS_negsetcond_i32 0
17
- return false;
17
+#define TCG_TARGET_HAS_negsetcond_i32 1
18
+ return true;
18
#define TCG_TARGET_HAS_add2_i32 1
19
#define TCG_TARGET_HAS_sub2_i32 1
20
#define TCG_TARGET_HAS_mulu2_i32 1
21
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
22
#define TCG_TARGET_HAS_sextract_i64 0
23
#define TCG_TARGET_HAS_extract2_i64 0
24
#define TCG_TARGET_HAS_movcond_i64 1
25
-#define TCG_TARGET_HAS_negsetcond_i64 0
26
+#define TCG_TARGET_HAS_negsetcond_i64 1
27
#define TCG_TARGET_HAS_add2_i64 1
28
#define TCG_TARGET_HAS_sub2_i64 1
29
#define TCG_TARGET_HAS_mulu2_i64 0
30
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/sparc64/tcg-target.c.inc
33
+++ b/tcg/sparc64/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
35
}
19
}
36
20
37
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
38
- TCGReg c1, int32_t c2, int c2const)
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
39
+ TCGReg c1, int32_t c2, int c2const, bool neg)
23
40
{
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
41
/* For 32-bit comparisons, we can play games with ADDC/SUBC. */
25
remove_mem_copy_all(ctx);
42
switch (cond) {
26
- return false;
43
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
27
+ return true;
44
default:
45
tcg_out_cmp(s, c1, c2, c2const);
46
tcg_out_movi_s13(s, ret, 0);
47
- tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
48
+ tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
49
return;
50
}
28
}
51
29
52
tcg_out_cmp(s, c1, c2, c2const);
30
switch (op->opc) {
53
if (cond == TCG_COND_LTU) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
54
- tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
32
g_assert_not_reached();
55
+ if (neg) {
56
+ /* 0 - 0 - C = -C = (C ? -1 : 0) */
57
+ tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_SUBC);
58
+ } else {
59
+ /* 0 + 0 + C = C = (C ? 1 : 0) */
60
+ tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
61
+ }
62
} else {
63
- tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
64
+ if (neg) {
65
+ /* 0 + -1 + C = C - 1 = (C ? 0 : -1) */
66
+ tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_ADDC);
67
+ } else {
68
+ /* 0 - -1 - C = 1 - C = (C ? 0 : 1) */
69
+ tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
70
+ }
71
}
33
}
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
35
- return false;
36
+ return true;
72
}
37
}
73
38
74
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
75
- TCGReg c1, int32_t c2, int c2const)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
76
+ TCGReg c1, int32_t c2, int c2const, bool neg)
41
TCGType type;
77
{
42
78
- if (use_vis3_instructions) {
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
79
+ if (use_vis3_instructions && !neg) {
44
- fold_tcg_st(ctx, op);
80
switch (cond) {
45
- return false;
81
case TCG_COND_NE:
46
+ return fold_tcg_st(ctx, op);
82
if (c2 != 0) {
83
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
84
if the input does not overlap the output. */
85
if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
86
tcg_out_movi_s13(s, ret, 0);
87
- tcg_out_movr(s, cond, ret, c1, 1, 1);
88
+ tcg_out_movr(s, cond, ret, c1, neg ? -1 : 1, 1);
89
} else {
90
tcg_out_cmp(s, c1, c2, c2const);
91
tcg_out_movi_s13(s, ret, 0);
92
- tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
93
+ tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
94
}
47
}
48
49
src = arg_temp(op->args[0]);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
51
last = ofs + tcg_type_size(type) - 1;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
55
+ return true;
95
}
56
}
96
57
97
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
98
tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
99
break;
100
case INDEX_op_setcond_i32:
101
- tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
102
+ tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false);
103
+ break;
104
+ case INDEX_op_negsetcond_i32:
105
+ tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true);
106
break;
107
case INDEX_op_movcond_i32:
108
tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
109
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
110
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
111
break;
112
case INDEX_op_setcond_i64:
113
- tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
114
+ tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false);
115
+ break;
116
+ case INDEX_op_negsetcond_i64:
117
+ tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true);
118
break;
119
case INDEX_op_movcond_i64:
120
tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
121
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
122
case INDEX_op_sar_i64:
123
case INDEX_op_setcond_i32:
124
case INDEX_op_setcond_i64:
125
+ case INDEX_op_negsetcond_i32:
126
+ case INDEX_op_negsetcond_i64:
127
return C_O1_I2(r, rZ, rJ);
128
129
case INDEX_op_brcond_i32:
130
--
59
--
131
2.34.1
60
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
1
From: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
2
3
3
This unintentionally causes the mov_vec, ld_vec and st_vec operations
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
to appear on the same line.
5
6
Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-Id: <20230823141740.35974-1-mark.cave-ayland@ilande.co.uk>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
docs/devel/tcg-ops.rst | 2 ++
7
tcg/optimize.c | 22 ++++++++++++++--------
12
1 file changed, 2 insertions(+)
8
1 file changed, 14 insertions(+), 8 deletions(-)
13
9
14
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/docs/devel/tcg-ops.rst
12
--- a/tcg/optimize.c
17
+++ b/docs/devel/tcg-ops.rst
13
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
19
.. list-table::
15
return finish_folding(ctx, op);
20
16
}
21
* - mov_vec *v0*, *v1*
17
22
+
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
23
ld_vec *v0*, *t1*
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
24
+
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
st_vec *v0*, *t1*
21
{
26
22
uint64_t a_zmask, b_val;
27
- | Move, load and store.
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
28
--
70
--
29
2.34.1
71
2.43.0
30
31
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots.
2
2
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Message-Id: <20230823145542.79633-9-philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
target/cris/translate.c | 20 +++++++++++---------
6
tcg/optimize.c | 3 +--
8
1 file changed, 11 insertions(+), 9 deletions(-)
7
1 file changed, 1 insertion(+), 2 deletions(-)
9
8
10
diff --git a/target/cris/translate.c b/target/cris/translate.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/target/cris/translate.c
11
--- a/tcg/optimize.c
13
+++ b/target/cris/translate.c
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static inline void t_gen_swapw(TCGv d, TCGv s)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
15
tcg_gen_or_tl(d, d, t);
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
16
}
20
}
17
21
18
-/* Reverse the within each byte.
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
19
- T0 = (((T0 << 7) & 0x80808080) |
20
- ((T0 << 5) & 0x40404040) |
21
- ((T0 << 3) & 0x20202020) |
22
- ((T0 << 1) & 0x10101010) |
23
- ((T0 >> 1) & 0x08080808) |
24
- ((T0 >> 3) & 0x04040404) |
25
- ((T0 >> 5) & 0x02020202) |
26
- ((T0 >> 7) & 0x01010101));
27
+/*
28
+ * Reverse the bits within each byte.
29
+ *
30
+ * T0 = ((T0 << 7) & 0x80808080)
31
+ * | ((T0 << 5) & 0x40404040)
32
+ * | ((T0 << 3) & 0x20202020)
33
+ * | ((T0 << 1) & 0x10101010)
34
+ * | ((T0 >> 1) & 0x08080808)
35
+ * | ((T0 >> 3) & 0x04040404)
36
+ * | ((T0 >> 5) & 0x02020202)
37
+ * | ((T0 >> 7) & 0x01010101);
38
*/
39
static void t_gen_swapr(TCGv d, TCGv s)
40
{
41
--
23
--
42
2.34.1
24
2.43.0
43
44
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots.
2
2
3
Document wswap_i64(), added in commit 46be8425ff
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
("tcg: Implement tcg_gen_{h,w}swap_{i32,i64}").
5
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-Id: <20230823145542.79633-8-philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
5
---
11
tcg/tcg-op.c | 5 +++++
6
tcg/optimize.c | 3 +--
12
1 file changed, 5 insertions(+)
7
1 file changed, 1 insertion(+), 2 deletions(-)
13
8
14
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/tcg-op.c
11
--- a/tcg/optimize.c
17
+++ b/tcg/tcg-op.c
12
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
19
tcg_temp_free_i64(t1);
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
20
}
21
21
22
+/*
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
+ * wswap_i64: Swap 32-bit words within a 64-bit value.
24
+ *
25
+ * Byte pattern: abcdefgh -> efghabcd
26
+ */
27
void tcg_gen_wswap_i64(TCGv_i64 ret, TCGv_i64 arg)
28
{
29
/* Swapping 2 32-bit elements is a rotate. */
30
--
23
--
31
2.34.1
24
2.43.0
32
33
diff view generated by jsdifflib
1
Use the carry bit to optimize some forms of setcond.
1
Avoid the use of the OptContext slots.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/i386/tcg-target.c.inc | 50 +++++++++++++++++++++++++++++++++++++++
6
tcg/optimize.c | 3 +--
7
1 file changed, 50 insertions(+)
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
8
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/i386/tcg-target.c.inc
11
--- a/tcg/optimize.c
12
+++ b/tcg/i386/tcg-target.c.inc
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
TCGArg dest, TCGArg arg1, TCGArg arg2,
14
return fold_setcond(ctx, op);
15
int const_arg2)
15
}
16
{
16
17
+ bool inv = false;
17
- ctx->z_mask = 1;
18
+
18
- return false;
19
+ switch (cond) {
19
+ return fold_masks_z(ctx, op, 1);
20
+ case TCG_COND_NE:
20
21
+ inv = true;
21
do_setcond_const:
22
+ /* fall through */
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
+ case TCG_COND_EQ:
24
+ /* If arg2 is 0, convert to LTU/GEU vs 1. */
25
+ if (const_arg2 && arg2 == 0) {
26
+ arg2 = 1;
27
+ goto do_ltu;
28
+ }
29
+ break;
30
+
31
+ case TCG_COND_LEU:
32
+ inv = true;
33
+ /* fall through */
34
+ case TCG_COND_GTU:
35
+ /* If arg2 is a register, swap for LTU/GEU. */
36
+ if (!const_arg2) {
37
+ TCGReg t = arg1;
38
+ arg1 = arg2;
39
+ arg2 = t;
40
+ goto do_ltu;
41
+ }
42
+ break;
43
+
44
+ case TCG_COND_GEU:
45
+ inv = true;
46
+ /* fall through */
47
+ case TCG_COND_LTU:
48
+ do_ltu:
49
+ /*
50
+ * Relying on the carry bit, use SBB to produce -1 if LTU, 0 if GEU.
51
+ * We can then use NEG or INC to produce the desired result.
52
+ * This is always smaller than the SETCC expansion.
53
+ */
54
+ tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
55
+ tgen_arithr(s, ARITH_SBB, dest, dest); /* T:-1 F:0 */
56
+ if (inv) {
57
+ tgen_arithi(s, ARITH_ADD, dest, 1, 0); /* T:0 F:1 */
58
+ } else {
59
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NEG, dest); /* T:1 F:0 */
60
+ }
61
+ return;
62
+
63
+ default:
64
+ break;
65
+ }
66
+
67
tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
68
tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
69
tcg_out_ext8u(s, dest, dest);
70
--
23
--
71
2.34.1
24
2.43.0
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230823145542.79633-6-philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
tcg/tcg-op.c | 5 +++++
4
tcg/optimize.c | 2 +-
9
1 file changed, 5 insertions(+)
5
1 file changed, 1 insertion(+), 1 deletion(-)
10
6
11
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/tcg-op.c
9
--- a/tcg/optimize.c
14
+++ b/tcg/tcg-op.c
10
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
16
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
17
}
18
18
19
+/*
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
+ * bswap64_i64: 64-bit byte swap on a 64-bit value.
21
+ *
22
+ * Byte pattern: abcdefgh -> hgfedcba
23
+ */
24
void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
25
{
26
if (TCG_TARGET_REG_BITS == 32) {
27
--
20
--
28
2.34.1
21
2.43.0
29
30
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Message-Id: <20230823145542.79633-5-philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
tcg/tcg-op.c | 11 ++++++++++-
4
tcg/optimize.c | 2 +-
8
1 file changed, 10 insertions(+), 1 deletion(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
9
6
10
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg-op.c
9
--- a/tcg/optimize.c
13
+++ b/tcg/tcg-op.c
10
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
15
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
16
}
17
}
17
18
18
+/*
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
19
+ * bswap32_i64: 32-bit byte swap on the low bits of a 64-bit value.
20
+ *
21
+ * Byte pattern: xxxxabcd -> yyyydcba
22
+ *
23
+ * With TCG_BSWAP_IZ, x == zero, else undefined.
24
+ * With TCG_BSWAP_OZ, y == zero, with TCG_BSWAP_OS y == sign, else undefined.
25
+ */
26
void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
27
{
28
/* Only one extension flag may be present. */
29
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags)
30
} else {
31
tcg_gen_shri_i64(t1, t1, 32); /* t1 = ....dc.. */
32
}
33
- tcg_gen_or_i64(ret, t0, t1); /* ret = ssssdcba */
34
+ tcg_gen_or_i64(ret, t0, t1); /* ret = ssssdcba (OS) */
35
+ /* ....dcba (else) */
36
37
tcg_temp_free_i64(t0);
38
tcg_temp_free_i64(t1);
39
--
20
--
40
2.34.1
21
2.43.0
41
42
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
2
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Message-Id: <20230823145542.79633-4-philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/tcg-op.c | 5 +++++
6
tcg/optimize.c | 24 +++++++++---------------
8
1 file changed, 5 insertions(+)
7
1 file changed, 9 insertions(+), 15 deletions(-)
9
8
10
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg-op.c
11
--- a/tcg/optimize.c
13
+++ b/tcg/tcg-op.c
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
15
}
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
16
}
51
}
17
52
18
+/*
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
19
+ * bswap32_i32: 32-bit byte swap on a 32-bit value.
20
+ *
21
+ * Byte pattern: abcd -> dcba
22
+ */
23
void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
24
{
25
if (TCG_TARGET_HAS_bswap32_i32) {
26
--
54
--
27
2.34.1
55
2.43.0
28
29
diff view generated by jsdifflib
1
For LT/GE vs zero, shift down the sign bit.
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/i386/tcg-target.c.inc | 15 +++++++++++++++
6
tcg/optimize.c | 27 ++++++++++++++-------------
7
1 file changed, 15 insertions(+)
7
1 file changed, 14 insertions(+), 13 deletions(-)
8
8
9
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/i386/tcg-target.c.inc
11
--- a/tcg/optimize.c
12
+++ b/tcg/i386/tcg-target.c.inc
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
24
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
28
+ t2 = arg_info(op->args[2]);
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
14
}
65
}
15
return;
66
break;
16
17
+ case TCG_COND_GE:
18
+ inv = true;
19
+ /* fall through */
20
+ case TCG_COND_LT:
21
+ /* If arg2 is 0, extract the sign bit. */
22
+ if (const_arg2 && arg2 == 0) {
23
+ tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, dest, arg1);
24
+ if (inv) {
25
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, dest);
26
+ }
27
+ tcg_out_shifti(s, SHIFT_SHR + rexw, dest, rexw ? 63 : 31);
28
+ return;
29
+ }
30
+ break;
31
+
32
default:
67
default:
33
break;
68
break;
34
}
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
74
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
35
--
76
--
36
2.34.1
77
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
7
---
4
tcg/i386/tcg-target.h | 4 ++--
8
tcg/optimize.c | 5 ++---
5
tcg/i386/tcg-target.c.inc | 32 ++++++++++++++++++++++++--------
9
1 file changed, 2 insertions(+), 3 deletions(-)
6
2 files changed, 26 insertions(+), 10 deletions(-)
7
10
8
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/i386/tcg-target.h
13
--- a/tcg/optimize.c
11
+++ b/tcg/i386/tcg-target.h
14
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ typedef enum {
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
13
#define TCG_TARGET_HAS_sextract_i32 1
16
14
#define TCG_TARGET_HAS_extract2_i32 1
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
#define TCG_TARGET_HAS_movcond_i32 1
16
-#define TCG_TARGET_HAS_negsetcond_i32 0
17
+#define TCG_TARGET_HAS_negsetcond_i32 1
18
#define TCG_TARGET_HAS_add2_i32 1
19
#define TCG_TARGET_HAS_sub2_i32 1
20
#define TCG_TARGET_HAS_mulu2_i32 1
21
@@ -XXX,XX +XXX,XX @@ typedef enum {
22
#define TCG_TARGET_HAS_sextract_i64 0
23
#define TCG_TARGET_HAS_extract2_i64 1
24
#define TCG_TARGET_HAS_movcond_i64 1
25
-#define TCG_TARGET_HAS_negsetcond_i64 0
26
+#define TCG_TARGET_HAS_negsetcond_i64 1
27
#define TCG_TARGET_HAS_add2_i64 1
28
#define TCG_TARGET_HAS_sub2_i64 1
29
#define TCG_TARGET_HAS_mulu2_i64 1
30
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/i386/tcg-target.c.inc
33
+++ b/tcg/i386/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
35
36
static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
37
TCGArg dest, TCGArg arg1, TCGArg arg2,
38
- int const_arg2)
39
+ int const_arg2, bool neg)
40
{
18
{
41
bool inv = false;
19
- uint64_t s_mask, z_mask, sign;
42
bool cleared;
20
+ uint64_t s_mask, z_mask;
43
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
21
TempOptInfo *t1, *t2;
44
* This is always smaller than the SETCC expansion.
22
23
if (fold_const2(ctx, op) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
25
* If the sign bit is known zero, then logical right shift
26
* will not reduce the number of input sign repetitions.
45
*/
27
*/
46
tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
28
- sign = -s_mask;
47
- tgen_arithr(s, ARITH_SBB, dest, dest); /* T:-1 F:0 */
29
- if (sign && !(z_mask & sign)) {
48
- if (inv) {
30
+ if (~z_mask & -s_mask) {
49
- tgen_arithi(s, ARITH_ADD, dest, 1, 0); /* T:0 F:1 */
31
return fold_masks_s(ctx, op, s_mask);
50
- } else {
51
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NEG, dest); /* T:1 F:0 */
52
+
53
+ /* X - X - C = -C = (C ? -1 : 0) */
54
+ tgen_arithr(s, ARITH_SBB + (neg ? rexw : 0), dest, dest);
55
+ if (inv && neg) {
56
+ /* ~(C ? -1 : 0) = (C ? 0 : -1) */
57
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, dest);
58
+ } else if (inv) {
59
+ /* (C ? -1 : 0) + 1 = (C ? 0 : 1) */
60
+ tgen_arithi(s, ARITH_ADD, dest, 1, 0);
61
+ } else if (!neg) {
62
+ /* -(C ? -1 : 0) = (C ? 1 : 0) */
63
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_NEG, dest);
64
}
65
return;
66
67
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
68
if (inv) {
69
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, dest);
70
}
71
- tcg_out_shifti(s, SHIFT_SHR + rexw, dest, rexw ? 63 : 31);
72
+ tcg_out_shifti(s, (neg ? SHIFT_SAR : SHIFT_SHR) + rexw,
73
+ dest, rexw ? 63 : 31);
74
return;
75
}
32
}
76
break;
33
break;
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
78
if (!cleared) {
79
tcg_out_ext8u(s, dest, dest);
80
}
81
+ if (neg) {
82
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, dest);
83
+ }
84
}
85
86
#if TCG_TARGET_REG_BITS == 32
87
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
88
arg_label(args[3]), 0);
89
break;
90
OP_32_64(setcond):
91
- tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2);
92
+ tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2, false);
93
+ break;
94
+ OP_32_64(negsetcond):
95
+ tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2, true);
96
break;
97
OP_32_64(movcond):
98
tcg_out_movcond(s, rexw, args[5], a0, a1, a2, const_a2, args[3]);
99
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
100
101
case INDEX_op_setcond_i32:
102
case INDEX_op_setcond_i64:
103
+ case INDEX_op_negsetcond_i32:
104
+ case INDEX_op_negsetcond_i64:
105
return C_O1_I2(q, r, re);
106
107
case INDEX_op_movcond_i32:
108
--
34
--
109
2.34.1
35
2.43.0
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
2
3
3
Document hswap_i32() and hswap_i64(), added in commit
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
46be8425ff ("tcg: Implement tcg_gen_{h,w}swap_{i32,i64}").
5
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Message-Id: <20230823145542.79633-7-philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
tcg/tcg-op.c | 25 ++++++++++++++++++-------
7
tcg/optimize.c | 9 ++++++---
12
1 file changed, 18 insertions(+), 7 deletions(-)
8
1 file changed, 6 insertions(+), 3 deletions(-)
13
9
14
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/tcg-op.c
12
--- a/tcg/optimize.c
17
+++ b/tcg/tcg-op.c
13
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
15
fold_sub_to_neg(ctx, op)) {
16
return true;
19
}
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
20
}
21
21
22
+/*
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
+ * hswap_i32: Swap 16-bit halfwords within a 32-bit value.
24
+ *
25
+ * Byte pattern: abcd -> cdab
26
+ */
27
void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg)
28
{
23
{
29
/* Swapping 2 16-bit elements is a rotate. */
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
30
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg)
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
31
}
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
32
}
38
}
33
39
34
+/*
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
35
+ * hswap_i64: Swap 16-bit halfwords within a 64-bit value.
36
+ * See also include/qemu/bitops.h, hswap64.
37
+ *
38
+ * Byte pattern: abcdefgh -> ghefcdab
39
+ */
40
void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg)
41
{
42
uint64_t m = 0x0000ffff0000ffffull;
43
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
44
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
45
46
- /* See include/qemu/bitops.h, hswap64. */
47
- tcg_gen_rotli_i64(t1, arg, 32);
48
- tcg_gen_andi_i64(t0, t1, m);
49
- tcg_gen_shli_i64(t0, t0, 16);
50
- tcg_gen_shri_i64(t1, t1, 16);
51
- tcg_gen_andi_i64(t1, t1, m);
52
- tcg_gen_or_i64(ret, t0, t1);
53
+ /* arg = abcdefgh */
54
+ tcg_gen_rotli_i64(t1, arg, 32); /* t1 = efghabcd */
55
+ tcg_gen_andi_i64(t0, t1, m); /* t0 = ..gh..cd */
56
+ tcg_gen_shli_i64(t0, t0, 16); /* t0 = gh..cd.. */
57
+ tcg_gen_shri_i64(t1, t1, 16); /* t1 = ..efghab */
58
+ tcg_gen_andi_i64(t1, t1, m); /* t1 = ..ef..ab */
59
+ tcg_gen_or_i64(ret, t0, t1); /* ret = ghefcdab */
60
61
tcg_temp_free_i64(t0);
62
tcg_temp_free_i64(t1);
63
--
41
--
64
2.34.1
42
2.43.0
65
66
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots.
2
2
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Message-Id: <20230823145542.79633-2-philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/tcg-op.c | 27 +++++++++++++++++++--------
6
tcg/optimize.c | 16 +++++++++-------
8
1 file changed, 19 insertions(+), 8 deletions(-)
7
1 file changed, 9 insertions(+), 7 deletions(-)
9
8
10
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg-op.c
11
--- a/tcg/optimize.c
13
+++ b/tcg/tcg-op.c
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
15
}
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
16
}
50
}
17
51
18
+/*
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
19
+ * bswap16_i32: 16-bit byte swap on the low bits of a 32-bit value.
20
+ *
21
+ * Byte pattern: xxab -> yyba
22
+ *
23
+ * With TCG_BSWAP_IZ, x == zero, else undefined.
24
+ * With TCG_BSWAP_OZ, y == zero, with TCG_BSWAP_OS y == sign, else undefined.
25
+ */
26
void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
27
{
28
/* Only one extension flag may be present. */
29
@@ -XXX,XX +XXX,XX @@ void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags)
30
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
31
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
32
33
- tcg_gen_shri_i32(t0, arg, 8);
34
+ /* arg = ..ab (IZ) xxab (!IZ) */
35
+ tcg_gen_shri_i32(t0, arg, 8); /* t0 = ...a (IZ) .xxa (!IZ) */
36
if (!(flags & TCG_BSWAP_IZ)) {
37
- tcg_gen_ext8u_i32(t0, t0);
38
+ tcg_gen_ext8u_i32(t0, t0); /* t0 = ...a */
39
}
40
41
if (flags & TCG_BSWAP_OS) {
42
- tcg_gen_shli_i32(t1, arg, 24);
43
- tcg_gen_sari_i32(t1, t1, 16);
44
+ tcg_gen_shli_i32(t1, arg, 24); /* t1 = b... */
45
+ tcg_gen_sari_i32(t1, t1, 16); /* t1 = ssb. */
46
} else if (flags & TCG_BSWAP_OZ) {
47
- tcg_gen_ext8u_i32(t1, arg);
48
- tcg_gen_shli_i32(t1, t1, 8);
49
+ tcg_gen_ext8u_i32(t1, arg); /* t1 = ...b */
50
+ tcg_gen_shli_i32(t1, t1, 8); /* t1 = ..b. */
51
} else {
52
- tcg_gen_shli_i32(t1, arg, 8);
53
+ tcg_gen_shli_i32(t1, arg, 8); /* t1 = xab. */
54
}
55
56
- tcg_gen_or_i32(ret, t0, t1);
57
+ tcg_gen_or_i32(ret, t0, t1); /* ret = ..ba (OZ) */
58
+ /* = ssba (OS) */
59
+ /* = xaba (no flag) */
60
tcg_temp_free_i32(t0);
61
tcg_temp_free_i32(t1);
62
}
63
--
53
--
64
2.34.1
54
2.43.0
65
66
diff view generated by jsdifflib
1
Trivial, as we simply need to load a different constant
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
in the conditional move.
3
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
tcg/arm/tcg-target.h | 2 +-
4
tcg/optimize.c | 2 +-
8
tcg/arm/tcg-target.c.inc | 9 +++++++++
5
1 file changed, 1 insertion(+), 1 deletion(-)
9
2 files changed, 10 insertions(+), 1 deletion(-)
10
6
11
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/arm/tcg-target.h
9
--- a/tcg/optimize.c
14
+++ b/tcg/arm/tcg-target.h
10
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
16
#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
12
TCGType type;
17
#define TCG_TARGET_HAS_extract2_i32 1
13
18
#define TCG_TARGET_HAS_movcond_i32 1
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
19
-#define TCG_TARGET_HAS_negsetcond_i32 0
15
- return false;
20
+#define TCG_TARGET_HAS_negsetcond_i32 1
16
+ return finish_folding(ctx, op);
21
#define TCG_TARGET_HAS_mulu2_i32 1
17
}
22
#define TCG_TARGET_HAS_muls2_i32 1
18
23
#define TCG_TARGET_HAS_muluh_i32 0
19
type = ctx->type;
24
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
25
index XXXXXXX..XXXXXXX 100644
26
--- a/tcg/arm/tcg-target.c.inc
27
+++ b/tcg/arm/tcg-target.c.inc
28
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
29
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
30
ARITH_MOV, args[0], 0, 0);
31
break;
32
+ case INDEX_op_negsetcond_i32:
33
+ tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
34
+ args[1], args[2], const_args[2]);
35
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
36
+ ARITH_MVN, args[0], 0, 0);
37
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
38
+ ARITH_MOV, args[0], 0, 0);
39
+ break;
40
41
case INDEX_op_brcond2_i32:
42
c = tcg_out_cmp2(s, args, const_args);
43
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
44
case INDEX_op_add_i32:
45
case INDEX_op_sub_i32:
46
case INDEX_op_setcond_i32:
47
+ case INDEX_op_negsetcond_i32:
48
return C_O1_I2(r, r, rIN);
49
50
case INDEX_op_and_i32:
51
--
20
--
52
2.34.1
21
2.43.0
diff view generated by jsdifflib
1
Pass a rexw parameter instead of duplicating the functions.
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Remove fold_masks as the function becomes unused.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
tcg/i386/tcg-target.c.inc | 24 +++++++-----------------
7
tcg/optimize.c | 18 ++++++++----------
8
1 file changed, 7 insertions(+), 17 deletions(-)
8
1 file changed, 8 insertions(+), 10 deletions(-)
9
9
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
12
--- a/tcg/optimize.c
13
+++ b/tcg/i386/tcg-target.c.inc
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
15
return fold_masks_zs(ctx, op, -1, s_mask);
15
}
16
}
16
#endif
17
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
18
-static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest,
19
-{
19
- TCGArg arg1, TCGArg arg2, int const_arg2)
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
20
+static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
21
-}
21
+ TCGArg dest, TCGArg arg1, TCGArg arg2,
22
-
22
+ int const_arg2)
23
/*
24
* An "affected" mask bit is 0 if and only if the result is identical
25
* to the first input. Thus if the entire mask is 0, the operation
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
27
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
23
{
29
{
24
- tcg_out_cmp(s, arg1, arg2, const_arg2, 0);
30
+ uint64_t z_mask, s_mask;
25
+ tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
31
+ TempOptInfo *t1, *t2;
26
tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
32
+
27
tcg_out_ext8u(s, dest, dest);
33
if (fold_const2_commutative(ctx, op) ||
34
fold_xx_to_i(ctx, op, 0) ||
35
fold_xi_to_x(ctx, op, 0) ||
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
38
}
39
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
28
}
50
}
29
51
30
-#if TCG_TARGET_REG_BITS == 64
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
31
-static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest,
32
- TCGArg arg1, TCGArg arg2, int const_arg2)
33
-{
34
- tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW);
35
- tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
36
- tcg_out_ext8u(s, dest, dest);
37
-}
38
-#else
39
+#if TCG_TARGET_REG_BITS == 32
40
static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
41
const int *const_args)
42
{
43
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
44
tcg_out_brcond(s, rexw, a2, a0, a1, const_args[1],
45
arg_label(args[3]), 0);
46
break;
47
- case INDEX_op_setcond_i32:
48
- tcg_out_setcond32(s, args[3], a0, a1, a2, const_a2);
49
+ OP_32_64(setcond):
50
+ tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2);
51
break;
52
case INDEX_op_movcond_i32:
53
tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]);
54
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
55
}
56
break;
57
58
- case INDEX_op_setcond_i64:
59
- tcg_out_setcond64(s, args[3], a0, a1, a2, const_a2);
60
- break;
61
case INDEX_op_movcond_i64:
62
tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]);
63
break;
64
--
53
--
65
2.34.1
54
2.43.0
66
67
diff view generated by jsdifflib
1
From: Anton Johansson via <qemu-devel@nongnu.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
Signed-off-by: Anton Johansson <anjo@rev.ng>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230807155706.9580-9-anjo@rev.ng>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
accel/tcg/cputlb.c | 2 +-
4
tcg/optimize.c | 2 +-
9
1 file changed, 1 insertion(+), 1 deletion(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
10
6
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
9
--- a/tcg/optimize.c
14
+++ b/accel/tcg/cputlb.c
10
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
16
}
17
}
17
18
18
static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
19
/* Propagate constants and copies, fold constant expressions. */
19
- target_ulong address, int flags,
20
+ vaddr address, int flags,
21
MMUAccessType access_type, bool enable)
22
{
23
if (enable) {
24
--
20
--
25
2.34.1
21
2.43.0
diff view generated by jsdifflib
1
Trivial, as aarch64 has an instruction for this: CSETM.
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/aarch64/tcg-target.h | 4 ++--
7
tcg/optimize.c | 6 ++----
7
tcg/aarch64/tcg-target.c.inc | 12 ++++++++++++
8
1 file changed, 2 insertions(+), 4 deletions(-)
8
2 files changed, 14 insertions(+), 2 deletions(-)
9
9
10
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/aarch64/tcg-target.h
12
--- a/tcg/optimize.c
13
+++ b/tcg/aarch64/tcg-target.h
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ typedef enum {
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
#define TCG_TARGET_HAS_sextract_i32 1
15
done = true;
16
#define TCG_TARGET_HAS_extract2_i32 1
16
break;
17
#define TCG_TARGET_HAS_movcond_i32 1
17
default:
18
-#define TCG_TARGET_HAS_negsetcond_i32 0
18
+ done = finish_folding(&ctx, op);
19
+#define TCG_TARGET_HAS_negsetcond_i32 1
19
break;
20
#define TCG_TARGET_HAS_add2_i32 1
20
}
21
#define TCG_TARGET_HAS_sub2_i32 1
21
-
22
#define TCG_TARGET_HAS_mulu2_i32 0
22
- if (!done) {
23
@@ -XXX,XX +XXX,XX @@ typedef enum {
23
- finish_folding(&ctx, op);
24
#define TCG_TARGET_HAS_sextract_i64 1
24
- }
25
#define TCG_TARGET_HAS_extract2_i64 1
25
+ tcg_debug_assert(done);
26
#define TCG_TARGET_HAS_movcond_i64 1
26
}
27
-#define TCG_TARGET_HAS_negsetcond_i64 0
27
}
28
+#define TCG_TARGET_HAS_negsetcond_i64 1
29
#define TCG_TARGET_HAS_add2_i64 1
30
#define TCG_TARGET_HAS_sub2_i64 1
31
#define TCG_TARGET_HAS_mulu2_i64 0
32
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/aarch64/tcg-target.c.inc
35
+++ b/tcg/aarch64/tcg-target.c.inc
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
37
TCG_REG_XZR, tcg_invert_cond(args[3]));
38
break;
39
40
+ case INDEX_op_negsetcond_i32:
41
+ a2 = (int32_t)a2;
42
+ /* FALLTHRU */
43
+ case INDEX_op_negsetcond_i64:
44
+ tcg_out_cmp(s, ext, a1, a2, c2);
45
+ /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */
46
+ tcg_out_insn(s, 3506, CSINV, ext, a0, TCG_REG_XZR,
47
+ TCG_REG_XZR, tcg_invert_cond(args[3]));
48
+ break;
49
+
50
case INDEX_op_movcond_i32:
51
a2 = (int32_t)a2;
52
/* FALLTHRU */
53
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
54
case INDEX_op_sub_i64:
55
case INDEX_op_setcond_i32:
56
case INDEX_op_setcond_i64:
57
+ case INDEX_op_negsetcond_i32:
58
+ case INDEX_op_negsetcond_i64:
59
return C_O1_I2(r, r, rA);
60
61
case INDEX_op_mul_i32:
62
--
28
--
63
2.34.1
29
2.43.0
diff view generated by jsdifflib
1
Tested-by: Nicholas Piggin <npiggin@gmail.com>
1
All mask setting is now done with parameters via fold_masks_*.
2
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
2
3
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
target/ppc/translate/fixedpoint-impl.c.inc | 6 ++++--
6
tcg/optimize.c | 13 -------------
7
target/ppc/translate/vmx-impl.c.inc | 8 +++-----
7
1 file changed, 13 deletions(-)
8
2 files changed, 7 insertions(+), 7 deletions(-)
9
8
10
diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/target/ppc/translate/fixedpoint-impl.c.inc
11
--- a/tcg/optimize.c
13
+++ b/target/ppc/translate/fixedpoint-impl.c.inc
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool do_set_bool_cond(DisasContext *ctx, arg_X_bi *a, bool neg, bool rev)
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
15
uint32_t mask = 0x08 >> (a->bi & 0x03);
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
16
TCGCond cond = rev ? TCG_COND_EQ : TCG_COND_NE;
15
17
TCGv temp = tcg_temp_new();
16
/* In flight values from optimization. */
18
+ TCGv zero = tcg_constant_tl(0);
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
19
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
20
tcg_gen_extu_i32_tl(temp, cpu_crf[a->bi >> 2]);
19
TCGType type;
21
tcg_gen_andi_tl(temp, temp, mask);
20
} OptContext;
22
- tcg_gen_setcondi_tl(cond, cpu_gpr[a->rt], temp, 0);
21
23
if (neg) {
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
24
- tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->rt]);
23
for (i = 0; i < nb_oargs; i++) {
25
+ tcg_gen_negsetcond_tl(cond, cpu_gpr[a->rt], temp, zero);
24
TCGTemp *ts = arg_temp(op->args[i]);
26
+ } else {
25
reset_ts(ctx, ts);
27
+ tcg_gen_setcond_tl(cond, cpu_gpr[a->rt], temp, zero);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
28
}
33
}
29
return true;
34
return true;
30
}
35
}
31
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
32
index XXXXXXX..XXXXXXX 100644
37
ctx.type = TCG_TYPE_I32;
33
--- a/target/ppc/translate/vmx-impl.c.inc
38
}
34
+++ b/target/ppc/translate/vmx-impl.c.inc
39
35
@@ -XXX,XX +XXX,XX @@ static bool trans_VCMPEQUQ(DisasContext *ctx, arg_VC *a)
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
36
tcg_gen_xor_i64(t1, t0, t1);
41
- ctx.z_mask = -1;
37
42
- ctx.s_mask = 0;
38
tcg_gen_or_i64(t1, t1, t2);
43
-
39
- tcg_gen_setcondi_i64(TCG_COND_EQ, t1, t1, 0);
44
/*
40
- tcg_gen_neg_i64(t1, t1);
45
* Process each opcode.
41
+ tcg_gen_negsetcond_i64(TCG_COND_EQ, t1, t1, tcg_constant_i64(0));
46
* Sorted alphabetically by opcode as much as possible.
42
43
set_avr64(a->vrt, t1, true);
44
set_avr64(a->vrt, t1, false);
45
@@ -XXX,XX +XXX,XX @@ static bool do_vcmpgtq(DisasContext *ctx, arg_VC *a, bool sign)
46
47
get_avr64(t0, a->vra, false);
48
get_avr64(t1, a->vrb, false);
49
- tcg_gen_setcond_i64(TCG_COND_GTU, t2, t0, t1);
50
+ tcg_gen_negsetcond_i64(TCG_COND_GTU, t2, t0, t1);
51
52
get_avr64(t0, a->vra, true);
53
get_avr64(t1, a->vrb, true);
54
tcg_gen_movcond_i64(TCG_COND_EQ, t2, t0, t1, t2, tcg_constant_i64(0));
55
- tcg_gen_setcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
56
+ tcg_gen_negsetcond_i64(sign ? TCG_COND_GT : TCG_COND_GTU, t1, t0, t1);
57
58
tcg_gen_or_i64(t1, t1, t2);
59
- tcg_gen_neg_i64(t1, t1);
60
61
set_avr64(a->vrt, t1, true);
62
set_avr64(a->vrt, t1, false);
63
--
47
--
64
2.34.1
48
2.43.0
diff view generated by jsdifflib
1
From: Anton Johansson via <qemu-devel@nongnu.org>
1
All instances of s_mask have been converted to the new
2
representation. We can now re-enable usage.
2
3
3
In system mode, abi_ptr is primarily used for representing addresses
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
when accessing guest memory with cpu_[st|ld]*(). Widening it from
5
target_ulong to vaddr reduces the target dependence of these functions
6
and is step towards building accel/ once for system mode.
7
8
Signed-off-by: Anton Johansson <anjo@rev.ng>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230807155706.9580-7-anjo@rev.ng>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
include/exec/cpu_ldst.h | 4 ++--
7
tcg/optimize.c | 4 ++--
14
1 file changed, 2 insertions(+), 2 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
15
9
16
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/cpu_ldst.h
12
--- a/tcg/optimize.c
19
+++ b/include/exec/cpu_ldst.h
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static inline bool guest_range_valid_untagged(abi_ulong start, abi_ulong len)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
21
h2g_nocheck(x); \
15
g_assert_not_reached();
22
})
16
}
23
#else
17
24
-typedef target_ulong abi_ptr;
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
25
-#define TARGET_ABI_FMT_ptr TARGET_FMT_lx
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
26
+typedef vaddr abi_ptr;
20
return true;
27
+#define TARGET_ABI_FMT_ptr "%016" VADDR_PRIx
21
}
28
#endif
22
29
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
30
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr);
24
s_mask = s_mask_old >> pos;
25
s_mask |= -1ull << (len - 1);
26
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
return true;
30
}
31
31
--
32
--
32
2.34.1
33
2.43.0
diff view generated by jsdifflib
1
Introduce a new opcode for negative setcond.
1
The big comment just above says functions should be sorted.
2
Add forward declarations as needed.
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
docs/devel/tcg-ops.rst | 6 ++++++
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
7
include/tcg/tcg-op-common.h | 4 ++++
8
1 file changed, 59 insertions(+), 55 deletions(-)
8
include/tcg/tcg-op.h | 2 ++
9
include/tcg/tcg-opc.h | 2 ++
10
include/tcg/tcg.h | 1 +
11
tcg/aarch64/tcg-target.h | 2 ++
12
tcg/arm/tcg-target.h | 1 +
13
tcg/i386/tcg-target.h | 2 ++
14
tcg/loongarch64/tcg-target.h | 3 +++
15
tcg/mips/tcg-target.h | 2 ++
16
tcg/ppc/tcg-target.h | 2 ++
17
tcg/riscv/tcg-target.h | 2 ++
18
tcg/s390x/tcg-target.h | 2 ++
19
tcg/sparc64/tcg-target.h | 2 ++
20
tcg/tci/tcg-target.h | 2 ++
21
tcg/optimize.c | 41 +++++++++++++++++++++++++++++++++++-
22
tcg/tcg-op.c | 36 +++++++++++++++++++++++++++++++
23
tcg/tcg.c | 6 ++++++
24
18 files changed, 117 insertions(+), 1 deletion(-)
25
9
26
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
27
index XXXXXXX..XXXXXXX 100644
28
--- a/docs/devel/tcg-ops.rst
29
+++ b/docs/devel/tcg-ops.rst
30
@@ -XXX,XX +XXX,XX @@ Conditional moves
31
|
32
| Set *dest* to 1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
33
34
+ * - negsetcond_i32/i64 *dest*, *t1*, *t2*, *cond*
35
+
36
+ - | *dest* = -(*t1* *cond* *t2*)
37
+ |
38
+ | Set *dest* to -1 if (*t1* *cond* *t2*) is true, otherwise set to 0.
39
+
40
* - movcond_i32/i64 *dest*, *c1*, *c2*, *v1*, *v2*, *cond*
41
42
- | *dest* = (*c1* *cond* *c2* ? *v1* : *v2*)
43
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/include/tcg/tcg-op-common.h
46
+++ b/include/tcg/tcg-op-common.h
47
@@ -XXX,XX +XXX,XX @@ void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
48
TCGv_i32 arg1, TCGv_i32 arg2);
49
void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
50
TCGv_i32 arg1, int32_t arg2);
51
+void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret,
52
+ TCGv_i32 arg1, TCGv_i32 arg2);
53
void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
54
TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2);
55
void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
56
@@ -XXX,XX +XXX,XX @@ void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
57
TCGv_i64 arg1, TCGv_i64 arg2);
58
void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
59
TCGv_i64 arg1, int64_t arg2);
60
+void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret,
61
+ TCGv_i64 arg1, TCGv_i64 arg2);
62
void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
63
TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2);
64
void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
65
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
66
index XXXXXXX..XXXXXXX 100644
67
--- a/include/tcg/tcg-op.h
68
+++ b/include/tcg/tcg-op.h
69
@@ -XXX,XX +XXX,XX @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
70
#define tcg_gen_brcondi_tl tcg_gen_brcondi_i64
71
#define tcg_gen_setcond_tl tcg_gen_setcond_i64
72
#define tcg_gen_setcondi_tl tcg_gen_setcondi_i64
73
+#define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i64
74
#define tcg_gen_mul_tl tcg_gen_mul_i64
75
#define tcg_gen_muli_tl tcg_gen_muli_i64
76
#define tcg_gen_div_tl tcg_gen_div_i64
77
@@ -XXX,XX +XXX,XX @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
78
#define tcg_gen_brcondi_tl tcg_gen_brcondi_i32
79
#define tcg_gen_setcond_tl tcg_gen_setcond_i32
80
#define tcg_gen_setcondi_tl tcg_gen_setcondi_i32
81
+#define tcg_gen_negsetcond_tl tcg_gen_negsetcond_i32
82
#define tcg_gen_mul_tl tcg_gen_mul_i32
83
#define tcg_gen_muli_tl tcg_gen_muli_i32
84
#define tcg_gen_div_tl tcg_gen_div_i32
85
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
86
index XXXXXXX..XXXXXXX 100644
87
--- a/include/tcg/tcg-opc.h
88
+++ b/include/tcg/tcg-opc.h
89
@@ -XXX,XX +XXX,XX @@ DEF(mb, 0, 0, 1, 0)
90
91
DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
92
DEF(setcond_i32, 1, 2, 1, 0)
93
+DEF(negsetcond_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_negsetcond_i32))
94
DEF(movcond_i32, 1, 4, 1, IMPL(TCG_TARGET_HAS_movcond_i32))
95
/* load/store */
96
DEF(ld8u_i32, 1, 1, 1, 0)
97
@@ -XXX,XX +XXX,XX @@ DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
98
99
DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
100
DEF(setcond_i64, 1, 2, 1, IMPL64)
101
+DEF(negsetcond_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_negsetcond_i64))
102
DEF(movcond_i64, 1, 4, 1, IMPL64 | IMPL(TCG_TARGET_HAS_movcond_i64))
103
/* load/store */
104
DEF(ld8u_i64, 1, 1, 1, IMPL64)
105
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
106
index XXXXXXX..XXXXXXX 100644
107
--- a/include/tcg/tcg.h
108
+++ b/include/tcg/tcg.h
109
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
110
#define TCG_TARGET_HAS_sextract_i64 0
111
#define TCG_TARGET_HAS_extract2_i64 0
112
#define TCG_TARGET_HAS_movcond_i64 0
113
+#define TCG_TARGET_HAS_negsetcond_i64 0
114
#define TCG_TARGET_HAS_add2_i64 0
115
#define TCG_TARGET_HAS_sub2_i64 0
116
#define TCG_TARGET_HAS_mulu2_i64 0
117
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
118
index XXXXXXX..XXXXXXX 100644
119
--- a/tcg/aarch64/tcg-target.h
120
+++ b/tcg/aarch64/tcg-target.h
121
@@ -XXX,XX +XXX,XX @@ typedef enum {
122
#define TCG_TARGET_HAS_sextract_i32 1
123
#define TCG_TARGET_HAS_extract2_i32 1
124
#define TCG_TARGET_HAS_movcond_i32 1
125
+#define TCG_TARGET_HAS_negsetcond_i32 0
126
#define TCG_TARGET_HAS_add2_i32 1
127
#define TCG_TARGET_HAS_sub2_i32 1
128
#define TCG_TARGET_HAS_mulu2_i32 0
129
@@ -XXX,XX +XXX,XX @@ typedef enum {
130
#define TCG_TARGET_HAS_sextract_i64 1
131
#define TCG_TARGET_HAS_extract2_i64 1
132
#define TCG_TARGET_HAS_movcond_i64 1
133
+#define TCG_TARGET_HAS_negsetcond_i64 0
134
#define TCG_TARGET_HAS_add2_i64 1
135
#define TCG_TARGET_HAS_sub2_i64 1
136
#define TCG_TARGET_HAS_mulu2_i64 0
137
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
138
index XXXXXXX..XXXXXXX 100644
139
--- a/tcg/arm/tcg-target.h
140
+++ b/tcg/arm/tcg-target.h
141
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
142
#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
143
#define TCG_TARGET_HAS_extract2_i32 1
144
#define TCG_TARGET_HAS_movcond_i32 1
145
+#define TCG_TARGET_HAS_negsetcond_i32 0
146
#define TCG_TARGET_HAS_mulu2_i32 1
147
#define TCG_TARGET_HAS_muls2_i32 1
148
#define TCG_TARGET_HAS_muluh_i32 0
149
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
150
index XXXXXXX..XXXXXXX 100644
151
--- a/tcg/i386/tcg-target.h
152
+++ b/tcg/i386/tcg-target.h
153
@@ -XXX,XX +XXX,XX @@ typedef enum {
154
#define TCG_TARGET_HAS_sextract_i32 1
155
#define TCG_TARGET_HAS_extract2_i32 1
156
#define TCG_TARGET_HAS_movcond_i32 1
157
+#define TCG_TARGET_HAS_negsetcond_i32 0
158
#define TCG_TARGET_HAS_add2_i32 1
159
#define TCG_TARGET_HAS_sub2_i32 1
160
#define TCG_TARGET_HAS_mulu2_i32 1
161
@@ -XXX,XX +XXX,XX @@ typedef enum {
162
#define TCG_TARGET_HAS_sextract_i64 0
163
#define TCG_TARGET_HAS_extract2_i64 1
164
#define TCG_TARGET_HAS_movcond_i64 1
165
+#define TCG_TARGET_HAS_negsetcond_i64 0
166
#define TCG_TARGET_HAS_add2_i64 1
167
#define TCG_TARGET_HAS_sub2_i64 1
168
#define TCG_TARGET_HAS_mulu2_i64 1
169
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
170
index XXXXXXX..XXXXXXX 100644
171
--- a/tcg/loongarch64/tcg-target.h
172
+++ b/tcg/loongarch64/tcg-target.h
173
@@ -XXX,XX +XXX,XX @@ typedef enum {
174
175
/* optional instructions */
176
#define TCG_TARGET_HAS_movcond_i32 1
177
+#define TCG_TARGET_HAS_negsetcond_i32 0
178
#define TCG_TARGET_HAS_div_i32 1
179
#define TCG_TARGET_HAS_rem_i32 1
180
#define TCG_TARGET_HAS_div2_i32 0
181
@@ -XXX,XX +XXX,XX @@ typedef enum {
182
183
/* 64-bit operations */
184
#define TCG_TARGET_HAS_movcond_i64 1
185
+#define TCG_TARGET_HAS_negsetcond_i64 0
186
#define TCG_TARGET_HAS_div_i64 1
187
#define TCG_TARGET_HAS_rem_i64 1
188
#define TCG_TARGET_HAS_div2_i64 0
189
@@ -XXX,XX +XXX,XX @@ typedef enum {
190
#define TCG_TARGET_HAS_muls2_i64 0
191
#define TCG_TARGET_HAS_muluh_i64 1
192
#define TCG_TARGET_HAS_mulsh_i64 1
193
+
194
#define TCG_TARGET_HAS_qemu_ldst_i128 0
195
196
#define TCG_TARGET_DEFAULT_MO (0)
197
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
198
index XXXXXXX..XXXXXXX 100644
199
--- a/tcg/mips/tcg-target.h
200
+++ b/tcg/mips/tcg-target.h
201
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
202
#define TCG_TARGET_HAS_muluh_i32 1
203
#define TCG_TARGET_HAS_mulsh_i32 1
204
#define TCG_TARGET_HAS_bswap32_i32 1
205
+#define TCG_TARGET_HAS_negsetcond_i32 0
206
207
#if TCG_TARGET_REG_BITS == 64
208
#define TCG_TARGET_HAS_add2_i32 0
209
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
210
#define TCG_TARGET_HAS_mulsh_i64 1
211
#define TCG_TARGET_HAS_ext32s_i64 1
212
#define TCG_TARGET_HAS_ext32u_i64 1
213
+#define TCG_TARGET_HAS_negsetcond_i64 0
214
#endif
215
216
/* optional instructions detected at runtime */
217
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
218
index XXXXXXX..XXXXXXX 100644
219
--- a/tcg/ppc/tcg-target.h
220
+++ b/tcg/ppc/tcg-target.h
221
@@ -XXX,XX +XXX,XX @@ typedef enum {
222
#define TCG_TARGET_HAS_sextract_i32 0
223
#define TCG_TARGET_HAS_extract2_i32 0
224
#define TCG_TARGET_HAS_movcond_i32 1
225
+#define TCG_TARGET_HAS_negsetcond_i32 0
226
#define TCG_TARGET_HAS_mulu2_i32 0
227
#define TCG_TARGET_HAS_muls2_i32 0
228
#define TCG_TARGET_HAS_muluh_i32 1
229
@@ -XXX,XX +XXX,XX @@ typedef enum {
230
#define TCG_TARGET_HAS_sextract_i64 0
231
#define TCG_TARGET_HAS_extract2_i64 0
232
#define TCG_TARGET_HAS_movcond_i64 1
233
+#define TCG_TARGET_HAS_negsetcond_i64 0
234
#define TCG_TARGET_HAS_add2_i64 1
235
#define TCG_TARGET_HAS_sub2_i64 1
236
#define TCG_TARGET_HAS_mulu2_i64 0
237
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
238
index XXXXXXX..XXXXXXX 100644
239
--- a/tcg/riscv/tcg-target.h
240
+++ b/tcg/riscv/tcg-target.h
241
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
242
243
/* optional instructions */
244
#define TCG_TARGET_HAS_movcond_i32 1
245
+#define TCG_TARGET_HAS_negsetcond_i32 0
246
#define TCG_TARGET_HAS_div_i32 1
247
#define TCG_TARGET_HAS_rem_i32 1
248
#define TCG_TARGET_HAS_div2_i32 0
249
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
250
#define TCG_TARGET_HAS_qemu_st8_i32 0
251
252
#define TCG_TARGET_HAS_movcond_i64 1
253
+#define TCG_TARGET_HAS_negsetcond_i64 0
254
#define TCG_TARGET_HAS_div_i64 1
255
#define TCG_TARGET_HAS_rem_i64 1
256
#define TCG_TARGET_HAS_div2_i64 0
257
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
258
index XXXXXXX..XXXXXXX 100644
259
--- a/tcg/s390x/tcg-target.h
260
+++ b/tcg/s390x/tcg-target.h
261
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
262
#define TCG_TARGET_HAS_sextract_i32 0
263
#define TCG_TARGET_HAS_extract2_i32 0
264
#define TCG_TARGET_HAS_movcond_i32 1
265
+#define TCG_TARGET_HAS_negsetcond_i32 0
266
#define TCG_TARGET_HAS_add2_i32 1
267
#define TCG_TARGET_HAS_sub2_i32 1
268
#define TCG_TARGET_HAS_mulu2_i32 0
269
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
270
#define TCG_TARGET_HAS_sextract_i64 0
271
#define TCG_TARGET_HAS_extract2_i64 0
272
#define TCG_TARGET_HAS_movcond_i64 1
273
+#define TCG_TARGET_HAS_negsetcond_i64 0
274
#define TCG_TARGET_HAS_add2_i64 1
275
#define TCG_TARGET_HAS_sub2_i64 1
276
#define TCG_TARGET_HAS_mulu2_i64 1
277
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/tcg/sparc64/tcg-target.h
280
+++ b/tcg/sparc64/tcg-target.h
281
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
282
#define TCG_TARGET_HAS_sextract_i32 0
283
#define TCG_TARGET_HAS_extract2_i32 0
284
#define TCG_TARGET_HAS_movcond_i32 1
285
+#define TCG_TARGET_HAS_negsetcond_i32 0
286
#define TCG_TARGET_HAS_add2_i32 1
287
#define TCG_TARGET_HAS_sub2_i32 1
288
#define TCG_TARGET_HAS_mulu2_i32 1
289
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
290
#define TCG_TARGET_HAS_sextract_i64 0
291
#define TCG_TARGET_HAS_extract2_i64 0
292
#define TCG_TARGET_HAS_movcond_i64 1
293
+#define TCG_TARGET_HAS_negsetcond_i64 0
294
#define TCG_TARGET_HAS_add2_i64 1
295
#define TCG_TARGET_HAS_sub2_i64 1
296
#define TCG_TARGET_HAS_mulu2_i64 0
297
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
298
index XXXXXXX..XXXXXXX 100644
299
--- a/tcg/tci/tcg-target.h
300
+++ b/tcg/tci/tcg-target.h
301
@@ -XXX,XX +XXX,XX @@
302
#define TCG_TARGET_HAS_orc_i32 1
303
#define TCG_TARGET_HAS_rot_i32 1
304
#define TCG_TARGET_HAS_movcond_i32 1
305
+#define TCG_TARGET_HAS_negsetcond_i32 0
306
#define TCG_TARGET_HAS_muls2_i32 1
307
#define TCG_TARGET_HAS_muluh_i32 0
308
#define TCG_TARGET_HAS_mulsh_i32 0
309
@@ -XXX,XX +XXX,XX @@
310
#define TCG_TARGET_HAS_orc_i64 1
311
#define TCG_TARGET_HAS_rot_i64 1
312
#define TCG_TARGET_HAS_movcond_i64 1
313
+#define TCG_TARGET_HAS_negsetcond_i64 0
314
#define TCG_TARGET_HAS_muls2_i64 1
315
#define TCG_TARGET_HAS_add2_i32 1
316
#define TCG_TARGET_HAS_sub2_i32 1
317
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
318
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
319
--- a/tcg/optimize.c
12
--- a/tcg/optimize.c
320
+++ b/tcg/optimize.c
13
+++ b/tcg/optimize.c
321
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
322
if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
15
* 3) those that produce information about the result value.
323
uint64_t tv = arg_info(op->args[3])->val;
16
*/
324
uint64_t fv = arg_info(op->args[4])->val;
17
325
- TCGOpcode opc;
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
326
+ TCGOpcode opc, negopc = 0;
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
327
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
328
switch (ctx->type) {
21
+
329
case TCG_TYPE_I32:
22
static bool fold_add(OptContext *ctx, TCGOp *op)
330
opc = INDEX_op_setcond_i32;
23
{
331
+ if (TCG_TARGET_HAS_negsetcond_i32) {
24
if (fold_const2_commutative(ctx, op) ||
332
+ negopc = INDEX_op_negsetcond_i32;
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
333
+ }
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
334
+ tv = (int32_t)tv;
335
+ fv = (int32_t)fv;
336
break;
337
case TCG_TYPE_I64:
338
opc = INDEX_op_setcond_i64;
339
+ if (TCG_TARGET_HAS_negsetcond_i64) {
340
+ negopc = INDEX_op_negsetcond_i64;
341
+ }
342
break;
343
default:
344
g_assert_not_reached();
345
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
346
} else if (fv == 1 && tv == 0) {
347
op->opc = opc;
348
op->args[3] = tcg_invert_cond(cond);
349
+ } else if (negopc) {
350
+ if (tv == -1 && fv == 0) {
351
+ op->opc = negopc;
352
+ op->args[3] = cond;
353
+ } else if (fv == -1 && tv == 0) {
354
+ op->opc = negopc;
355
+ op->args[3] = tcg_invert_cond(cond);
356
+ }
357
}
358
}
359
return false;
360
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
361
return false;
362
}
27
}
363
28
364
+static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
365
+{
30
+{
366
+ TCGCond cond = op->args[3];
31
+ /* If true and false values are the same, eliminate the cmp. */
367
+ int i;
32
+ if (args_are_copies(op->args[2], op->args[3])) {
368
+
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
369
+ if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
370
+ op->args[3] = cond = tcg_swap_cond(cond);
371
+ }
34
+ }
372
+
35
+
373
+ i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
374
+ if (i >= 0) {
37
+ uint64_t tv = arg_info(op->args[2])->val;
375
+ return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
38
+ uint64_t fv = arg_info(op->args[3])->val;
39
+
40
+ if (tv == -1 && fv == 0) {
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
42
+ }
43
+ if (tv == 0 && fv == -1) {
44
+ if (TCG_TARGET_HAS_not_vec) {
45
+ op->opc = INDEX_op_not_vec;
46
+ return fold_not(ctx, op);
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
51
+ }
52
+ }
376
+ }
53
+ }
377
+
54
+ if (arg_is_const(op->args[2])) {
378
+ /* Value is {0,-1} so all bits are repetitions of the sign. */
55
+ uint64_t tv = arg_info(op->args[2])->val;
379
+ ctx->s_mask = -1;
56
+ if (tv == -1) {
380
+ return false;
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
381
+}
82
+}
382
+
83
+
383
+
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
384
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
385
{
85
{
386
TCGCond cond = op->args[5];
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
387
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
388
CASE_OP_32_64(setcond):
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
389
done = fold_setcond(&ctx, op);
390
break;
391
+ CASE_OP_32_64(negsetcond):
392
+ done = fold_negsetcond(&ctx, op);
393
+ break;
394
case INDEX_op_setcond2_i32:
395
done = fold_setcond2(&ctx, op);
396
break;
397
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
398
index XXXXXXX..XXXXXXX 100644
399
--- a/tcg/tcg-op.c
400
+++ b/tcg/tcg-op.c
401
@@ -XXX,XX +XXX,XX @@ void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
402
tcg_gen_setcond_i32(cond, ret, arg1, tcg_constant_i32(arg2));
403
}
89
}
404
90
405
+void tcg_gen_negsetcond_i32(TCGCond cond, TCGv_i32 ret,
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
406
+ TCGv_i32 arg1, TCGv_i32 arg2)
92
-{
407
+{
93
- /* If true and false values are the same, eliminate the cmp. */
408
+ if (cond == TCG_COND_ALWAYS) {
94
- if (args_are_copies(op->args[2], op->args[3])) {
409
+ tcg_gen_movi_i32(ret, -1);
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
410
+ } else if (cond == TCG_COND_NEVER) {
96
- }
411
+ tcg_gen_movi_i32(ret, 0);
97
-
412
+ } else if (TCG_TARGET_HAS_negsetcond_i32) {
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
413
+ tcg_gen_op4i_i32(INDEX_op_negsetcond_i32, ret, arg1, arg2, cond);
99
- uint64_t tv = arg_info(op->args[2])->val;
414
+ } else {
100
- uint64_t fv = arg_info(op->args[3])->val;
415
+ tcg_gen_setcond_i32(cond, ret, arg1, arg2);
101
-
416
+ tcg_gen_neg_i32(ret, ret);
102
- if (tv == -1 && fv == 0) {
417
+ }
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
418
+}
104
- }
419
+
105
- if (tv == 0 && fv == -1) {
420
void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
106
- if (TCG_TARGET_HAS_not_vec) {
107
- op->opc = INDEX_op_not_vec;
108
- return fold_not(ctx, op);
109
- } else {
110
- op->opc = INDEX_op_xor_vec;
111
- op->args[2] = arg_new_constant(ctx, -1);
112
- return fold_xor(ctx, op);
113
- }
114
- }
115
- }
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
144
-}
145
-
146
/* Propagate constants and copies, fold constant expressions. */
147
void tcg_optimize(TCGContext *s)
421
{
148
{
422
if (arg2 == 0) {
423
@@ -XXX,XX +XXX,XX @@ void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
424
}
425
}
426
427
+void tcg_gen_negsetcond_i64(TCGCond cond, TCGv_i64 ret,
428
+ TCGv_i64 arg1, TCGv_i64 arg2)
429
+{
430
+ if (cond == TCG_COND_ALWAYS) {
431
+ tcg_gen_movi_i64(ret, -1);
432
+ } else if (cond == TCG_COND_NEVER) {
433
+ tcg_gen_movi_i64(ret, 0);
434
+ } else if (TCG_TARGET_HAS_negsetcond_i64) {
435
+ tcg_gen_op4i_i64(INDEX_op_negsetcond_i64, ret, arg1, arg2, cond);
436
+ } else if (TCG_TARGET_REG_BITS == 32) {
437
+ tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret),
438
+ TCGV_LOW(arg1), TCGV_HIGH(arg1),
439
+ TCGV_LOW(arg2), TCGV_HIGH(arg2), cond);
440
+ tcg_gen_neg_i32(TCGV_LOW(ret), TCGV_LOW(ret));
441
+ tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_LOW(ret));
442
+ } else {
443
+ tcg_gen_setcond_i64(cond, ret, arg1, arg2);
444
+ tcg_gen_neg_i64(ret, ret);
445
+ }
446
+}
447
+
448
void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
449
{
450
if (arg2 == 0) {
451
diff --git a/tcg/tcg.c b/tcg/tcg.c
452
index XXXXXXX..XXXXXXX 100644
453
--- a/tcg/tcg.c
454
+++ b/tcg/tcg.c
455
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
456
case INDEX_op_sar_i32:
457
return true;
458
459
+ case INDEX_op_negsetcond_i32:
460
+ return TCG_TARGET_HAS_negsetcond_i32;
461
case INDEX_op_movcond_i32:
462
return TCG_TARGET_HAS_movcond_i32;
463
case INDEX_op_div_i32:
464
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
465
case INDEX_op_extu_i32_i64:
466
return TCG_TARGET_REG_BITS == 64;
467
468
+ case INDEX_op_negsetcond_i64:
469
+ return TCG_TARGET_HAS_negsetcond_i64;
470
case INDEX_op_movcond_i64:
471
return TCG_TARGET_HAS_movcond_i64;
472
case INDEX_op_div_i64:
473
@@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
474
switch (c) {
475
case INDEX_op_brcond_i32:
476
case INDEX_op_setcond_i32:
477
+ case INDEX_op_negsetcond_i32:
478
case INDEX_op_movcond_i32:
479
case INDEX_op_brcond2_i32:
480
case INDEX_op_setcond2_i32:
481
case INDEX_op_brcond_i64:
482
case INDEX_op_setcond_i64:
483
+ case INDEX_op_negsetcond_i64:
484
case INDEX_op_movcond_i64:
485
case INDEX_op_cmp_vec:
486
case INDEX_op_cmpsel_vec:
487
--
149
--
488
2.34.1
150
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
The big comment just above says functions should be sorted.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/riscv/tcg-target.h | 4 ++--
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
5
tcg/riscv/tcg-target.c.inc | 45 ++++++++++++++++++++++++++++++++++++++
7
1 file changed, 30 insertions(+), 30 deletions(-)
6
2 files changed, 47 insertions(+), 2 deletions(-)
7
8
8
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/riscv/tcg-target.h
11
--- a/tcg/optimize.c
11
+++ b/tcg/riscv/tcg-target.h
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
13
14
return true;
14
/* optional instructions */
15
#define TCG_TARGET_HAS_movcond_i32 1
16
-#define TCG_TARGET_HAS_negsetcond_i32 0
17
+#define TCG_TARGET_HAS_negsetcond_i32 1
18
#define TCG_TARGET_HAS_div_i32 1
19
#define TCG_TARGET_HAS_rem_i32 1
20
#define TCG_TARGET_HAS_div2_i32 0
21
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
22
#define TCG_TARGET_HAS_qemu_st8_i32 0
23
24
#define TCG_TARGET_HAS_movcond_i64 1
25
-#define TCG_TARGET_HAS_negsetcond_i64 0
26
+#define TCG_TARGET_HAS_negsetcond_i64 1
27
#define TCG_TARGET_HAS_div_i64 1
28
#define TCG_TARGET_HAS_rem_i64 1
29
#define TCG_TARGET_HAS_div2_i64 0
30
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/riscv/tcg-target.c.inc
33
+++ b/tcg/riscv/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
35
}
36
}
15
}
37
16
38
+static void tcg_out_negsetcond(TCGContext *s, TCGCond cond, TCGReg ret,
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
39
+ TCGReg arg1, tcg_target_long arg2, bool c2)
40
+{
18
+{
41
+ int tmpflags;
19
+ /* Canonicalize the comparison to put immediate second. */
42
+ TCGReg tmp;
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
22
+ }
23
+ return finish_folding(ctx, op);
24
+}
43
+
25
+
44
+ /* For LT/GE comparison against 0, replicate the sign bit. */
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
45
+ if (c2 && arg2 == 0) {
27
+{
46
+ switch (cond) {
28
+ /* If true and false values are the same, eliminate the cmp. */
47
+ case TCG_COND_GE:
29
+ if (args_are_copies(op->args[3], op->args[4])) {
48
+ tcg_out_opc_imm(s, OPC_XORI, ret, arg1, -1);
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
49
+ arg1 = ret;
50
+ /* fall through */
51
+ case TCG_COND_LT:
52
+ tcg_out_opc_imm(s, OPC_SRAI, ret, arg1, TCG_TARGET_REG_BITS - 1);
53
+ return;
54
+ default:
55
+ break;
56
+ }
57
+ }
31
+ }
58
+
32
+
59
+ tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
33
+ /* Canonicalize the comparison to put immediate second. */
60
+ tmp = tmpflags & ~SETCOND_FLAGS;
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
61
+
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
62
+ /* If intermediate result is zero/non-zero: test != 0. */
63
+ if (tmpflags & SETCOND_NEZ) {
64
+ tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, tmp);
65
+ tmp = ret;
66
+ }
36
+ }
67
+
37
+ /*
68
+ /* Produce the 0/-1 result. */
38
+ * Canonicalize the "false" input reg to match the destination,
69
+ if (tmpflags & SETCOND_INV) {
39
+ * so that the tcg backend can implement "move if true".
70
+ tcg_out_opc_imm(s, OPC_ADDI, ret, tmp, -1);
40
+ */
71
+ } else {
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
72
+ tcg_out_opc_reg(s, OPC_SUB, ret, TCG_REG_ZERO, tmp);
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
73
+ }
43
+ }
44
+ return finish_folding(ctx, op);
74
+}
45
+}
75
+
46
+
76
static void tcg_out_movcond_zicond(TCGContext *s, TCGReg ret, TCGReg test_ne,
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
77
int val1, bool c_val1,
48
{
78
int val2, bool c_val2)
49
uint64_t z_mask, s_mask;
79
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
80
tcg_out_setcond(s, args[3], a0, a1, a2, c2);
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
81
break;
52
}
82
53
83
+ case INDEX_op_negsetcond_i32:
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
84
+ case INDEX_op_negsetcond_i64:
55
-{
85
+ tcg_out_negsetcond(s, args[3], a0, a1, a2, c2);
56
- /* Canonicalize the comparison to put immediate second. */
86
+ break;
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
87
+
58
- op->args[3] = tcg_swap_cond(op->args[3]);
88
case INDEX_op_movcond_i32:
59
- }
89
case INDEX_op_movcond_i64:
60
- return finish_folding(ctx, op);
90
tcg_out_movcond(s, args[5], a0, a1, a2, c2,
61
-}
91
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
62
-
92
case INDEX_op_xor_i64:
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
93
case INDEX_op_setcond_i32:
64
-{
94
case INDEX_op_setcond_i64:
65
- /* If true and false values are the same, eliminate the cmp. */
95
+ case INDEX_op_negsetcond_i32:
66
- if (args_are_copies(op->args[3], op->args[4])) {
96
+ case INDEX_op_negsetcond_i64:
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
97
return C_O1_I2(r, r, rI);
68
- }
98
69
-
99
case INDEX_op_andc_i32:
70
- /* Canonicalize the comparison to put immediate second. */
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
82
-}
83
-
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
85
{
86
uint64_t z_mask, s_mask, s_mask_old;
100
--
87
--
101
2.34.1
88
2.43.0
diff view generated by jsdifflib
1
The SETBC family of instructions requires exactly two insns for
1
We currently have a flag, float_muladd_halve_result, to scale
2
all comparisions, saving 0-3 insns per (neg)setcond.
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
3
4
Tested-by: Nicholas Piggin <npiggin@gmail.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/ppc/tcg-target.c.inc | 22 ++++++++++++++++++++++
7
include/fpu/softfloat.h | 6 ++++
9
1 file changed, 22 insertions(+)
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
10
9
fpu/softfloat-parts.c.inc | 7 +++--
11
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/ppc/tcg-target.c.inc
14
--- a/include/fpu/softfloat.h
14
+++ b/tcg/ppc/tcg-target.c.inc
15
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
16
#define TW XO31( 4)
17
float16 float16_sub(float16, float16, float_status *status);
17
#define TRAP (TW | TO(31))
18
float16 float16_mul(float16, float16, float_status *status);
18
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
19
+#define SETBC XO31(384) /* v3.10 */
20
+float16 float16_muladd_scalbn(float16, float16, float16,
20
+#define SETBCR XO31(416) /* v3.10 */
21
+ int, int, float_status *status);
21
+#define SETNBC XO31(448) /* v3.10 */
22
float16 float16_div(float16, float16, float_status *status);
22
+#define SETNBCR XO31(480) /* v3.10 */
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
99
+}
23
+
100
+
24
#define NOP ORI /* ori 0,0,0 */
101
+float32 QEMU_SOFTFLOAT_ATTR
25
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
26
#define LVX XO31(103)
103
+ int scale, int flags, float_status *status)
27
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
104
{
28
arg2 = (uint32_t)arg2;
105
FloatParts64 pa, pb, pc, *pr;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
29
}
187
}
30
188
31
+ /* With SETBC/SETBCR, we can always implement with 2 insns. */
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
32
+ if (have_isa_3_10) {
190
index XXXXXXX..XXXXXXX 100644
33
+ tcg_insn_unit bi, opc;
191
--- a/fpu/softfloat-parts.c.inc
34
+
192
+++ b/fpu/softfloat-parts.c.inc
35
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
36
+
194
* Requires A and C extracted into a double-sized structure to provide the
37
+ /* Re-use tcg_to_bc for BI and BO_COND_{TRUE,FALSE}. */
195
* extra space for the widening multiply.
38
+ bi = tcg_to_bc[cond] & (0x1f << 16);
196
*/
39
+ if (tcg_to_bc[cond] & BO(8)) {
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
40
+ opc = neg ? SETNBC : SETBC;
198
- FloatPartsN *c, int flags, float_status *s)
41
+ } else {
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
42
+ opc = neg ? SETNBCR : SETBCR;
200
+ FloatPartsN *c, int scale,
43
+ }
201
+ int flags, float_status *s)
44
+ tcg_out32(s, opc | RT(arg0) | bi);
202
{
45
+ return;
203
int ab_mask, abc_mask;
46
+ }
204
FloatPartsW p_widen, c_widen;
47
+
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
48
/* Handle common and trivial cases before handling anything else. */
206
a->exp = p_widen.exp;
49
if (arg2 == 0) {
207
50
switch (cond) {
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
51
--
217
--
52
2.34.1
218
2.43.0
219
220
diff view generated by jsdifflib
1
Pass a rexw parameter instead of duplicating the functions.
1
Use the scalbn interface instead of float_muladd_halve_result.
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/i386/tcg-target.c.inc | 28 +++++++---------------------
6
target/arm/tcg/helper-a64.c | 6 +++---
8
1 file changed, 7 insertions(+), 21 deletions(-)
7
1 file changed, 3 insertions(+), 3 deletions(-)
9
8
10
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/i386/tcg-target.c.inc
11
--- a/target/arm/tcg/helper-a64.c
13
+++ b/tcg/i386/tcg-target.c.inc
12
+++ b/target/arm/tcg/helper-a64.c
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
14
(float16_is_infinity(b) && float16_is_zero(a))) {
15
return float16_one_point_five;
15
}
16
}
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
16
}
19
}
17
20
18
-static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest,
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
19
- TCGReg c1, TCGArg c2, int const_c2,
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
20
- TCGReg v1)
23
(float32_is_infinity(b) && float32_is_zero(a))) {
21
+static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond,
24
return float32_one_point_five;
22
+ TCGReg dest, TCGReg c1, TCGArg c2, int const_c2,
25
}
23
+ TCGReg v1)
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
24
{
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
25
- tcg_out_cmp(s, c1, c2, const_c2, 0);
26
- tcg_out_cmov(s, cond, 0, dest, v1);
27
+ tcg_out_cmp(s, c1, c2, const_c2, rexw);
28
+ tcg_out_cmov(s, cond, rexw, dest, v1);
29
}
28
}
30
29
31
-#if TCG_TARGET_REG_BITS == 64
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
32
-static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest,
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
33
- TCGReg c1, TCGArg c2, int const_c2,
32
(float64_is_infinity(b) && float64_is_zero(a))) {
34
- TCGReg v1)
33
return float64_one_point_five;
35
-{
34
}
36
- tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
37
- tcg_out_cmov(s, cond, P_REXW, dest, v1);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
38
-}
37
}
39
-#endif
38
40
-
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
41
static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
42
TCGArg arg2, bool const_a2)
43
{
44
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
45
OP_32_64(setcond):
46
tcg_out_setcond(s, rexw, args[3], a0, a1, a2, const_a2);
47
break;
48
- case INDEX_op_movcond_i32:
49
- tcg_out_movcond32(s, args[5], a0, a1, a2, const_a2, args[3]);
50
+ OP_32_64(movcond):
51
+ tcg_out_movcond(s, rexw, args[5], a0, a1, a2, const_a2, args[3]);
52
break;
53
54
OP_32_64(bswap16):
55
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
56
}
57
break;
58
59
- case INDEX_op_movcond_i64:
60
- tcg_out_movcond64(s, args[5], a0, a1, a2, const_a2, args[3]);
61
- break;
62
-
63
case INDEX_op_bswap64_i64:
64
tcg_out_bswap64(s, a0);
65
break;
66
--
40
--
67
2.34.1
41
2.43.0
68
42
69
43
diff view generated by jsdifflib
1
The setcond + neg + or sequence is a complex method of
1
Use the scalbn interface instead of float_muladd_halve_result.
2
performing a conditional move.
2
3
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
target/sparc/translate.c | 17 ++++-------------
6
target/sparc/helper.h | 4 +-
8
1 file changed, 4 insertions(+), 13 deletions(-)
7
target/sparc/fop_helper.c | 8 ++--
9
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/helper.h
14
+++ b/target/sparc/helper.h
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
23
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
10
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
11
index XXXXXXX..XXXXXXX 100644
60
index XXXXXXX..XXXXXXX 100644
12
--- a/target/sparc/translate.c
61
--- a/target/sparc/translate.c
13
+++ b/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
14
@@ -XXX,XX +XXX,XX @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
15
64
16
tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
17
tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
66
{
18
- tcg_gen_andi_tl(dst, lo1, omask);
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
19
+ tcg_gen_andi_tl(lo1, lo1, omask);
68
+ TCGv_i32 z = tcg_constant_i32(0);
20
tcg_gen_andi_tl(lo2, lo2, omask);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
21
70
}
22
amask = -8;
71
23
@@ -XXX,XX +XXX,XX @@ static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
24
tcg_gen_andi_tl(s1, s1, amask);
73
{
25
tcg_gen_andi_tl(s2, s2, amask);
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
26
75
+ TCGv_i32 z = tcg_constant_i32(0);
27
- /* We want to compute
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
28
- dst = (s1 == s2 ? lo1 : lo1 & lo2).
77
}
29
- We've already done dst = lo1, so this reduces to
78
30
- dst &= (s1 == s2 ? -1 : lo2)
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
31
- Which we perform by
80
{
32
- lo2 |= -(s1 == s2)
81
- int op = float_muladd_negate_c;
33
- dst &= lo2
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
34
- */
83
+ TCGv_i32 z = tcg_constant_i32(0);
35
- tcg_gen_setcond_tl(TCG_COND_EQ, lo1, s1, s2);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
36
- tcg_gen_neg_tl(lo1, lo1);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
37
- tcg_gen_or_tl(lo2, lo2, lo1);
86
}
38
- tcg_gen_and_tl(dst, dst, lo2);
87
39
+ /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
40
+ tcg_gen_and_tl(lo2, lo2, lo1);
89
{
41
+ tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
90
- int op = float_muladd_negate_c;
42
}
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
43
92
+ TCGv_i32 z = tcg_constant_i32(0);
44
static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
45
--
205
--
46
2.34.1
206
2.43.0
207
208
diff view generated by jsdifflib
1
Replace the separate defines with TCG_TARGET_HAS_extr_i64_i32,
1
All uses have been convered to float*_muladd_scalbn.
2
so that the two parts of backend-specific type changing cannot
3
be out of sync.
4
2
5
Reported-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-id: <20230822175127.1173698-1-richard.henderson@linaro.org>
9
---
5
---
10
include/tcg/tcg-opc.h | 4 ++--
6
include/fpu/softfloat.h | 3 ---
11
include/tcg/tcg.h | 3 +--
7
fpu/softfloat.c | 6 ------
12
tcg/aarch64/tcg-target.h | 3 +--
8
fpu/softfloat-parts.c.inc | 4 ----
13
tcg/i386/tcg-target.h | 3 +--
9
3 files changed, 13 deletions(-)
14
tcg/loongarch64/tcg-target.h | 3 +--
15
tcg/mips/tcg-target.h | 3 +--
16
tcg/ppc/tcg-target.h | 3 +--
17
tcg/riscv/tcg-target.h | 3 +--
18
tcg/s390x/tcg-target.h | 3 +--
19
tcg/sparc64/tcg-target.h | 3 +--
20
tcg/tci/tcg-target.h | 3 +--
21
tcg/tcg-op.c | 4 ++--
22
tcg/tcg.c | 3 +--
23
13 files changed, 15 insertions(+), 26 deletions(-)
24
10
25
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
26
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
27
--- a/include/tcg/tcg-opc.h
13
--- a/include/fpu/softfloat.h
28
+++ b/include/tcg/tcg-opc.h
14
+++ b/include/fpu/softfloat.h
29
@@ -XXX,XX +XXX,XX @@ DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64))
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
30
DEF(ext_i32_i64, 1, 1, 0, IMPL64)
16
| Using these differs from negating an input or output before calling
31
DEF(extu_i32_i64, 1, 1, 0, IMPL64)
17
| the muladd function in that this means that a NaN doesn't have its
32
DEF(extrl_i64_i32, 1, 1, 0,
18
| sign bit inverted before it is propagated.
33
- IMPL(TCG_TARGET_HAS_extrl_i64_i32)
19
-| We also support halving the result before rounding, as a special
34
+ IMPL(TCG_TARGET_HAS_extr_i64_i32)
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
35
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
21
*----------------------------------------------------------------------------*/
36
DEF(extrh_i64_i32, 1, 1, 0,
22
enum {
37
- IMPL(TCG_TARGET_HAS_extrh_i64_i32)
23
float_muladd_negate_c = 1,
38
+ IMPL(TCG_TARGET_HAS_extr_i64_i32)
24
float_muladd_negate_product = 2,
39
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
25
float_muladd_negate_result = 4,
40
26
- float_muladd_halve_result = 8,
41
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL64)
27
};
42
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
28
29
/*----------------------------------------------------------------------------
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
43
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
44
--- a/include/tcg/tcg.h
32
--- a/fpu/softfloat.c
45
+++ b/include/tcg/tcg.h
33
+++ b/fpu/softfloat.c
46
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
47
35
if (unlikely(!can_use_fpu(s))) {
48
#if TCG_TARGET_REG_BITS == 32
36
goto soft;
49
/* Turn some undef macros into false macros. */
37
}
50
-#define TCG_TARGET_HAS_extrl_i64_i32 0
38
- if (unlikely(flags & float_muladd_halve_result)) {
51
-#define TCG_TARGET_HAS_extrh_i64_i32 0
39
- goto soft;
52
+#define TCG_TARGET_HAS_extr_i64_i32 0
40
- }
53
#define TCG_TARGET_HAS_div_i64 0
41
54
#define TCG_TARGET_HAS_rem_i64 0
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
55
#define TCG_TARGET_HAS_div2_i64 0
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
56
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
57
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
58
--- a/tcg/aarch64/tcg-target.h
56
--- a/fpu/softfloat-parts.c.inc
59
+++ b/tcg/aarch64/tcg-target.h
57
+++ b/fpu/softfloat-parts.c.inc
60
@@ -XXX,XX +XXX,XX @@ typedef enum {
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
61
#define TCG_TARGET_HAS_muls2_i32 0
59
a->exp = p_widen.exp;
62
#define TCG_TARGET_HAS_muluh_i32 0
60
63
#define TCG_TARGET_HAS_mulsh_i32 0
61
return_normal:
64
-#define TCG_TARGET_HAS_extrl_i64_i32 0
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
65
-#define TCG_TARGET_HAS_extrh_i64_i32 0
63
- if (flags & float_muladd_halve_result) {
66
+#define TCG_TARGET_HAS_extr_i64_i32 0
64
- a->exp -= 1;
67
#define TCG_TARGET_HAS_qemu_st8_i32 0
65
- }
68
66
a->exp += scale;
69
#define TCG_TARGET_HAS_div_i64 1
67
finish_sign:
70
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
68
if (flags & float_muladd_negate_result) {
71
index XXXXXXX..XXXXXXX 100644
72
--- a/tcg/i386/tcg-target.h
73
+++ b/tcg/i386/tcg-target.h
74
@@ -XXX,XX +XXX,XX @@ typedef enum {
75
76
#if TCG_TARGET_REG_BITS == 64
77
/* Keep 32-bit values zero-extended in a register. */
78
-#define TCG_TARGET_HAS_extrl_i64_i32 1
79
-#define TCG_TARGET_HAS_extrh_i64_i32 1
80
+#define TCG_TARGET_HAS_extr_i64_i32 1
81
#define TCG_TARGET_HAS_div2_i64 1
82
#define TCG_TARGET_HAS_rot_i64 1
83
#define TCG_TARGET_HAS_ext8s_i64 1
84
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
85
index XXXXXXX..XXXXXXX 100644
86
--- a/tcg/loongarch64/tcg-target.h
87
+++ b/tcg/loongarch64/tcg-target.h
88
@@ -XXX,XX +XXX,XX @@ typedef enum {
89
#define TCG_TARGET_HAS_extract_i64 1
90
#define TCG_TARGET_HAS_sextract_i64 0
91
#define TCG_TARGET_HAS_extract2_i64 0
92
-#define TCG_TARGET_HAS_extrl_i64_i32 1
93
-#define TCG_TARGET_HAS_extrh_i64_i32 1
94
+#define TCG_TARGET_HAS_extr_i64_i32 1
95
#define TCG_TARGET_HAS_ext8s_i64 1
96
#define TCG_TARGET_HAS_ext16s_i64 1
97
#define TCG_TARGET_HAS_ext32s_i64 1
98
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
99
index XXXXXXX..XXXXXXX 100644
100
--- a/tcg/mips/tcg-target.h
101
+++ b/tcg/mips/tcg-target.h
102
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
103
#if TCG_TARGET_REG_BITS == 64
104
#define TCG_TARGET_HAS_add2_i32 0
105
#define TCG_TARGET_HAS_sub2_i32 0
106
-#define TCG_TARGET_HAS_extrl_i64_i32 1
107
-#define TCG_TARGET_HAS_extrh_i64_i32 1
108
+#define TCG_TARGET_HAS_extr_i64_i32 1
109
#define TCG_TARGET_HAS_div_i64 1
110
#define TCG_TARGET_HAS_rem_i64 1
111
#define TCG_TARGET_HAS_not_i64 1
112
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/ppc/tcg-target.h
115
+++ b/tcg/ppc/tcg-target.h
116
@@ -XXX,XX +XXX,XX @@ typedef enum {
117
#if TCG_TARGET_REG_BITS == 64
118
#define TCG_TARGET_HAS_add2_i32 0
119
#define TCG_TARGET_HAS_sub2_i32 0
120
-#define TCG_TARGET_HAS_extrl_i64_i32 0
121
-#define TCG_TARGET_HAS_extrh_i64_i32 0
122
+#define TCG_TARGET_HAS_extr_i64_i32 0
123
#define TCG_TARGET_HAS_div_i64 1
124
#define TCG_TARGET_HAS_rem_i64 have_isa_3_00
125
#define TCG_TARGET_HAS_rot_i64 1
126
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/riscv/tcg-target.h
129
+++ b/tcg/riscv/tcg-target.h
130
@@ -XXX,XX +XXX,XX @@ extern bool have_zbb;
131
#define TCG_TARGET_HAS_extract_i64 0
132
#define TCG_TARGET_HAS_sextract_i64 0
133
#define TCG_TARGET_HAS_extract2_i64 0
134
-#define TCG_TARGET_HAS_extrl_i64_i32 1
135
-#define TCG_TARGET_HAS_extrh_i64_i32 1
136
+#define TCG_TARGET_HAS_extr_i64_i32 1
137
#define TCG_TARGET_HAS_ext8s_i64 1
138
#define TCG_TARGET_HAS_ext16s_i64 1
139
#define TCG_TARGET_HAS_ext32s_i64 1
140
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/s390x/tcg-target.h
143
+++ b/tcg/s390x/tcg-target.h
144
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
145
#define TCG_TARGET_HAS_muls2_i32 0
146
#define TCG_TARGET_HAS_muluh_i32 0
147
#define TCG_TARGET_HAS_mulsh_i32 0
148
-#define TCG_TARGET_HAS_extrl_i64_i32 0
149
-#define TCG_TARGET_HAS_extrh_i64_i32 0
150
+#define TCG_TARGET_HAS_extr_i64_i32 0
151
#define TCG_TARGET_HAS_qemu_st8_i32 0
152
153
#define TCG_TARGET_HAS_div2_i64 1
154
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
155
index XXXXXXX..XXXXXXX 100644
156
--- a/tcg/sparc64/tcg-target.h
157
+++ b/tcg/sparc64/tcg-target.h
158
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
159
#define TCG_TARGET_HAS_mulsh_i32 0
160
#define TCG_TARGET_HAS_qemu_st8_i32 0
161
162
-#define TCG_TARGET_HAS_extrl_i64_i32 1
163
-#define TCG_TARGET_HAS_extrh_i64_i32 1
164
+#define TCG_TARGET_HAS_extr_i64_i32 1
165
#define TCG_TARGET_HAS_div_i64 1
166
#define TCG_TARGET_HAS_rem_i64 0
167
#define TCG_TARGET_HAS_rot_i64 0
168
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
169
index XXXXXXX..XXXXXXX 100644
170
--- a/tcg/tci/tcg-target.h
171
+++ b/tcg/tci/tcg-target.h
172
@@ -XXX,XX +XXX,XX @@
173
#define TCG_TARGET_HAS_qemu_st8_i32 0
174
175
#if TCG_TARGET_REG_BITS == 64
176
-#define TCG_TARGET_HAS_extrl_i64_i32 0
177
-#define TCG_TARGET_HAS_extrh_i64_i32 0
178
+#define TCG_TARGET_HAS_extr_i64_i32 0
179
#define TCG_TARGET_HAS_bswap16_i64 1
180
#define TCG_TARGET_HAS_bswap32_i64 1
181
#define TCG_TARGET_HAS_bswap64_i64 1
182
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tcg/tcg-op.c
185
+++ b/tcg/tcg-op.c
186
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
187
{
188
if (TCG_TARGET_REG_BITS == 32) {
189
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
190
- } else if (TCG_TARGET_HAS_extrl_i64_i32) {
191
+ } else if (TCG_TARGET_HAS_extr_i64_i32) {
192
tcg_gen_op2(INDEX_op_extrl_i64_i32,
193
tcgv_i32_arg(ret), tcgv_i64_arg(arg));
194
} else {
195
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
196
{
197
if (TCG_TARGET_REG_BITS == 32) {
198
tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
199
- } else if (TCG_TARGET_HAS_extrh_i64_i32) {
200
+ } else if (TCG_TARGET_HAS_extr_i64_i32) {
201
tcg_gen_op2(INDEX_op_extrh_i64_i32,
202
tcgv_i32_arg(ret), tcgv_i64_arg(arg));
203
} else {
204
diff --git a/tcg/tcg.c b/tcg/tcg.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/tcg/tcg.c
207
+++ b/tcg/tcg.c
208
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
209
case INDEX_op_extract2_i64:
210
return TCG_TARGET_HAS_extract2_i64;
211
case INDEX_op_extrl_i64_i32:
212
- return TCG_TARGET_HAS_extrl_i64_i32;
213
case INDEX_op_extrh_i64_i32:
214
- return TCG_TARGET_HAS_extrh_i64_i32;
215
+ return TCG_TARGET_HAS_extr_i64_i32;
216
case INDEX_op_ext8s_i64:
217
return TCG_TARGET_HAS_ext8s_i64;
218
case INDEX_op_ext16s_i64:
219
--
69
--
220
2.34.1
70
2.43.0
221
71
222
72
diff view generated by jsdifflib
1
The setcond + neg + and sequence is a complex method of
1
This rounding mode is used by Hexagon.
2
performing a conditional move.
3
2
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
target/alpha/translate.c | 7 +++----
5
include/fpu/softfloat-types.h | 2 ++
8
1 file changed, 3 insertions(+), 4 deletions(-)
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
9
8
10
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/target/alpha/translate.c
11
--- a/include/fpu/softfloat-types.h
13
+++ b/target/alpha/translate.c
12
+++ b/include/fpu/softfloat-types.h
14
@@ -XXX,XX +XXX,XX @@ static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
15
14
float_round_to_odd = 5,
16
case TCG_COND_GE:
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
17
case TCG_COND_LT:
16
float_round_to_odd_inf = 6,
18
- /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
19
- tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
18
+ float_round_nearest_even_max = 7,
20
- tcg_gen_neg_i64(dest, dest);
19
} FloatRoundMode;
21
- tcg_gen_and_i64(dest, dest, src);
20
22
+ /* For >= or <, map -0.0 to +0.0. */
21
/*
23
+ tcg_gen_movcond_i64(TCG_COND_NE, dest, src, tcg_constant_i64(mzero),
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
24
+ src, tcg_constant_i64(0));
23
index XXXXXXX..XXXXXXX 100644
25
break;
24
--- a/fpu/softfloat-parts.c.inc
26
25
+++ b/fpu/softfloat-parts.c.inc
27
default:
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
28
--
36
--
29
2.34.1
37
2.43.0
diff view generated by jsdifflib
1
From: Anton Johansson via <qemu-devel@nongnu.org>
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
2
3
3
Widens the pc and saved_insn fields of kvm_sw_breakpoint from
4
target_ulong to vaddr. The pc argument of kvm_find_sw_breakpoint is also
5
widened to match.
6
7
Signed-off-by: Anton Johansson <anjo@rev.ng>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20230807155706.9580-2-anjo@rev.ng>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
5
---
12
include/sysemu/kvm.h | 6 +++---
6
include/fpu/softfloat.h | 5 +++++
13
accel/kvm/kvm-all.c | 3 +--
7
fpu/softfloat.c | 3 +++
14
2 files changed, 4 insertions(+), 5 deletions(-)
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
15
10
16
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/include/sysemu/kvm.h
13
--- a/include/fpu/softfloat.h
19
+++ b/include/sysemu/kvm.h
14
+++ b/include/fpu/softfloat.h
20
@@ -XXX,XX +XXX,XX @@ struct kvm_guest_debug;
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
21
struct kvm_debug_exit_arch;
16
| Using these differs from negating an input or output before calling
22
17
| the muladd function in that this means that a NaN doesn't have its
23
struct kvm_sw_breakpoint {
18
| sign bit inverted before it is propagated.
24
- target_ulong pc;
19
+|
25
- target_ulong saved_insn;
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
26
+ vaddr pc;
21
+| such that the product is a true zero, then return C without addition.
27
+ vaddr saved_insn;
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
28
int use_count;
23
*----------------------------------------------------------------------------*/
29
QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
30
};
29
};
31
30
32
struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
31
/*----------------------------------------------------------------------------
33
- target_ulong pc);
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
34
+ vaddr pc);
35
36
int kvm_sw_breakpoints_active(CPUState *cpu);
37
38
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
39
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
40
--- a/accel/kvm/kvm-all.c
34
--- a/fpu/softfloat.c
41
+++ b/accel/kvm/kvm-all.c
35
+++ b/fpu/softfloat.c
42
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_supports_user_irq(void)
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
43
}
37
if (unlikely(!can_use_fpu(s))) {
44
38
goto soft;
45
#ifdef KVM_CAP_SET_GUEST_DEBUG
39
}
46
-struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
47
- target_ulong pc)
41
+ goto soft;
48
+struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc)
42
+ }
49
{
43
50
struct kvm_sw_breakpoint *bp;
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
51
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
52
--
61
--
53
2.34.1
62
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
There are no special cases for this instruction.
2
Reviewed-by: Bastian Koppelmann <kbastian@mail.uni-paderborn.de>
2
Remove internal_mpyf as unused.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
target/tricore/translate.c | 16 ++++++----------
7
target/hexagon/fma_emu.h | 1 -
6
1 file changed, 6 insertions(+), 10 deletions(-)
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
7
11
8
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
9
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
10
--- a/target/tricore/translate.c
14
--- a/target/hexagon/fma_emu.h
11
+++ b/target/tricore/translate.c
15
+++ b/target/hexagon/fma_emu.h
12
@@ -XXX,XX +XXX,XX @@ gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
13
gen_accumulating_cond(cond, ret, r1, temp, op);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
14
}
30
}
15
31
16
-/* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
17
-static inline void gen_cond_w(TCGCond cond, TCGv ret, TCGv r1, TCGv r2)
18
-{
33
-{
19
- tcg_gen_setcond_tl(cond, ret, r1, r2);
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
20
- tcg_gen_neg_tl(ret, ret);
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
21
-}
38
-}
22
-
39
-
23
static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
24
{
48
{
25
TCGv b0 = tcg_temp_new();
49
float32 RdV;
26
@@ -XXX,XX +XXX,XX @@ static void decode_rr_accumulator(DisasContext *ctx)
50
arch_fpop_start(env);
27
gen_helper_eq_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
28
break;
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
29
case OPC2_32_RR_EQ_W:
53
arch_fpop_end(env);
30
- gen_cond_w(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
54
return RdV;
31
+ tcg_gen_negsetcond_tl(TCG_COND_EQ, cpu_gpr_d[r3],
55
}
32
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
33
break;
34
case OPC2_32_RR_EQANY_B:
35
gen_helper_eqany_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
36
@@ -XXX,XX +XXX,XX @@ static void decode_rr_accumulator(DisasContext *ctx)
37
gen_helper_lt_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
38
break;
39
case OPC2_32_RR_LT_W:
40
- gen_cond_w(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
41
+ tcg_gen_negsetcond_tl(TCG_COND_LT, cpu_gpr_d[r3],
42
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
43
break;
44
case OPC2_32_RR_LT_WU:
45
- gen_cond_w(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
46
+ tcg_gen_negsetcond_tl(TCG_COND_LTU, cpu_gpr_d[r3],
47
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
48
break;
49
case OPC2_32_RR_MAX:
50
tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1],
51
--
56
--
52
2.34.1
57
2.43.0
53
54
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
There are no special cases for this instruction.
2
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/tcg-op-gvec.c | 6 ++----
6
target/hexagon/op_helper.c | 2 +-
6
tcg/tcg-op.c | 6 ++----
7
1 file changed, 1 insertion(+), 1 deletion(-)
7
2 files changed, 4 insertions(+), 8 deletions(-)
8
8
9
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/tcg-op-gvec.c
11
--- a/target/hexagon/op_helper.c
12
+++ b/tcg/tcg-op-gvec.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ static void expand_cmp_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
for (i = 0; i < oprsz; i += 4) {
14
float32 RsV, float32 RtV)
15
tcg_gen_ld_i32(t0, cpu_env, aofs + i);
15
{
16
tcg_gen_ld_i32(t1, cpu_env, bofs + i);
16
arch_fpop_start(env);
17
- tcg_gen_setcond_i32(cond, t0, t0, t1);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
- tcg_gen_neg_i32(t0, t0);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
+ tcg_gen_negsetcond_i32(cond, t0, t0, t1);
19
arch_fpop_end(env);
20
tcg_gen_st_i32(t0, cpu_env, dofs + i);
20
return RxV;
21
}
21
}
22
tcg_temp_free_i32(t1);
23
@@ -XXX,XX +XXX,XX @@ static void expand_cmp_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
24
for (i = 0; i < oprsz; i += 8) {
25
tcg_gen_ld_i64(t0, cpu_env, aofs + i);
26
tcg_gen_ld_i64(t1, cpu_env, bofs + i);
27
- tcg_gen_setcond_i64(cond, t0, t0, t1);
28
- tcg_gen_neg_i64(t0, t0);
29
+ tcg_gen_negsetcond_i64(cond, t0, t0, t1);
30
tcg_gen_st_i64(t0, cpu_env, dofs + i);
31
}
32
tcg_temp_free_i64(t1);
33
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/tcg-op.c
36
+++ b/tcg/tcg-op.c
37
@@ -XXX,XX +XXX,XX @@ void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
38
} else {
39
TCGv_i32 t0 = tcg_temp_ebb_new_i32();
40
TCGv_i32 t1 = tcg_temp_ebb_new_i32();
41
- tcg_gen_setcond_i32(cond, t0, c1, c2);
42
- tcg_gen_neg_i32(t0, t0);
43
+ tcg_gen_negsetcond_i32(cond, t0, c1, c2);
44
tcg_gen_and_i32(t1, v1, t0);
45
tcg_gen_andc_i32(ret, v2, t0);
46
tcg_gen_or_i32(ret, ret, t1);
47
@@ -XXX,XX +XXX,XX @@ void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
48
} else {
49
TCGv_i64 t0 = tcg_temp_ebb_new_i64();
50
TCGv_i64 t1 = tcg_temp_ebb_new_i64();
51
- tcg_gen_setcond_i64(cond, t0, c1, c2);
52
- tcg_gen_neg_i64(t0, t0);
53
+ tcg_gen_negsetcond_i64(cond, t0, c1, c2);
54
tcg_gen_and_i64(t1, v1, t0);
55
tcg_gen_andc_i64(ret, v2, t0);
56
tcg_gen_or_i64(ret, ret, t1);
57
--
22
--
58
2.34.1
23
2.43.0
59
60
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
2
4
3
Commit 609ad70562 ("tcg: Split trunc_shr_i32 opcode into
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
extr[lh]_i64_i32") remove trunc_shr_i64_i32(). Update the
5
backend documentation.
6
7
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20230822162847.71206-1-philmd@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
7
---
12
docs/devel/tcg-ops.rst | 7 ++++---
8
target/hexagon/op_helper.c | 5 ++---
13
1 file changed, 4 insertions(+), 3 deletions(-)
9
1 file changed, 2 insertions(+), 3 deletions(-)
14
10
15
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/docs/devel/tcg-ops.rst
13
--- a/target/hexagon/op_helper.c
18
+++ b/docs/devel/tcg-ops.rst
14
+++ b/target/hexagon/op_helper.c
19
@@ -XXX,XX +XXX,XX @@ sub2_i32, brcond2_i32).
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
20
On a 64 bit target, the values are transferred between 32 and 64-bit
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
21
registers using the following ops:
17
float32 RsV, float32 RtV)
22
18
{
23
-- trunc_shr_i64_i32
19
- float32 neg_RsV;
24
+- extrl_i64_i32
20
arch_fpop_start(env);
25
+- extrh_i64_i32
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
26
- ext_i32_i64
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
27
- extu_i32_i64
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
28
24
+ &env->fp_status);
29
They ensure that the values are correctly truncated or extended when
25
arch_fpop_end(env);
30
moved from a 32-bit to a 64-bit register or vice-versa. Note that the
26
return RxV;
31
-trunc_shr_i64_i32 is an optional op. It is not necessary to implement
27
}
32
-it if all the following conditions are met:
33
+extrl_i64_i32 and extrh_i64_i32 are optional ops. It is not necessary
34
+to implement them if all the following conditions are met:
35
36
- 64-bit registers can hold 32-bit values
37
- 32-bit values in a 64-bit register do not need to stay zero or
38
--
28
--
39
2.34.1
29
2.43.0
40
41
diff view generated by jsdifflib
1
We can use MOVB and MOVW with an immediate just as easily
1
This instruction has a special case that 0 * x + c returns c
2
as with a register input.
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
3
5
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
tcg/i386/tcg-target-con-set.h | 2 +-
9
target/hexagon/op_helper.c | 11 +++--------
8
tcg/i386/tcg-target.c.inc | 26 ++++++++++++++++++++++----
10
1 file changed, 3 insertions(+), 8 deletions(-)
9
2 files changed, 23 insertions(+), 5 deletions(-)
10
11
11
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/i386/tcg-target-con-set.h
14
--- a/target/hexagon/op_helper.c
14
+++ b/tcg/i386/tcg-target-con-set.h
15
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, q)
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
16
C_O1_I1(r, r)
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
17
C_O1_I1(x, r)
18
float32 RsV, float32 RtV, float32 PuV)
18
C_O1_I1(x, x)
19
{
19
-C_O1_I2(q, 0, q)
20
- size4s_t tmp;
20
+C_O1_I2(q, 0, qi)
21
arch_fpop_start(env);
21
C_O1_I2(q, r, re)
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
22
C_O1_I2(r, 0, ci)
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
23
C_O1_I2(r, 0, r)
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
24
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
25
index XXXXXXX..XXXXXXX 100644
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
26
--- a/tcg/i386/tcg-target.c.inc
27
- RxV = tmp;
27
+++ b/tcg/i386/tcg-target.c.inc
28
- }
28
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
29
#define OPC_MOVL_GvEv    (0x8b)        /* loads, more or less */
30
+ float_muladd_suppress_add_product_zero,
30
#define OPC_MOVB_EvIz (0xc6)
31
+ &env->fp_status);
31
#define OPC_MOVL_EvIz    (0xc7)
32
arch_fpop_end(env);
32
+#define OPC_MOVB_Ib (0xb0)
33
return RxV;
33
#define OPC_MOVL_Iv (0xb8)
34
}
34
#define OPC_MOVBE_GyMy (0xf0 | P_EXT38)
35
#define OPC_MOVBE_MyGy (0xf1 | P_EXT38)
36
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
37
OP_32_64(deposit):
38
if (args[3] == 0 && args[4] == 8) {
39
/* load bits 0..7 */
40
- tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
41
+ if (const_a2) {
42
+ tcg_out_opc(s, OPC_MOVB_Ib | P_REXB_RM | LOWREGMASK(a0),
43
+ 0, a0, 0);
44
+ tcg_out8(s, a2);
45
+ } else {
46
+ tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
47
+ }
48
} else if (TCG_TARGET_REG_BITS == 32 && args[3] == 8 && args[4] == 8) {
49
/* load bits 8..15 */
50
- tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
51
+ if (const_a2) {
52
+ tcg_out8(s, OPC_MOVB_Ib + a0 + 4);
53
+ tcg_out8(s, a2);
54
+ } else {
55
+ tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
56
+ }
57
} else if (args[3] == 0 && args[4] == 16) {
58
/* load bits 0..15 */
59
- tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
60
+ if (const_a2) {
61
+ tcg_out_opc(s, OPC_MOVL_Iv | P_DATA16 | LOWREGMASK(a0),
62
+ 0, a0, 0);
63
+ tcg_out16(s, a2);
64
+ } else {
65
+ tcg_out_modrm(s, OPC_MOVL_EvGv | P_DATA16, a2, a0);
66
+ }
67
} else {
68
g_assert_not_reached();
69
}
70
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
71
72
case INDEX_op_deposit_i32:
73
case INDEX_op_deposit_i64:
74
- return C_O1_I2(q, 0, q);
75
+ return C_O1_I2(q, 0, qi);
76
77
case INDEX_op_setcond_i32:
78
case INDEX_op_setcond_i64:
79
--
35
--
80
2.34.1
36
2.43.0
diff view generated by jsdifflib
1
In the general case we simply negate. However with isel we
1
There are multiple special cases for this instruction.
2
may load -1 instead of 1 with no extra effort.
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
3
8
4
Consolidate EQ0 and NE0 logic. Replace the NE0 zero-extension
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
with inversion+negation of EQ0, which is never worse and may
6
eliminate one insn. Provide a special case for -EQ0.
7
8
Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
---
11
tcg/ppc/tcg-target.h | 4 +-
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
12
tcg/ppc/tcg-target.c.inc | 127 ++++++++++++++++++++++++---------------
13
1 file changed, 26 insertions(+), 79 deletions(-)
13
2 files changed, 82 insertions(+), 49 deletions(-)
14
14
15
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/ppc/tcg-target.h
17
--- a/target/hexagon/op_helper.c
18
+++ b/tcg/ppc/tcg-target.h
18
+++ b/target/hexagon/op_helper.c
19
@@ -XXX,XX +XXX,XX @@ typedef enum {
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
20
#define TCG_TARGET_HAS_sextract_i32 0
20
return RxV;
21
#define TCG_TARGET_HAS_extract2_i32 0
22
#define TCG_TARGET_HAS_movcond_i32 1
23
-#define TCG_TARGET_HAS_negsetcond_i32 0
24
+#define TCG_TARGET_HAS_negsetcond_i32 1
25
#define TCG_TARGET_HAS_mulu2_i32 0
26
#define TCG_TARGET_HAS_muls2_i32 0
27
#define TCG_TARGET_HAS_muluh_i32 1
28
@@ -XXX,XX +XXX,XX @@ typedef enum {
29
#define TCG_TARGET_HAS_sextract_i64 0
30
#define TCG_TARGET_HAS_extract2_i64 0
31
#define TCG_TARGET_HAS_movcond_i64 1
32
-#define TCG_TARGET_HAS_negsetcond_i64 0
33
+#define TCG_TARGET_HAS_negsetcond_i64 1
34
#define TCG_TARGET_HAS_add2_i64 1
35
#define TCG_TARGET_HAS_sub2_i64 1
36
#define TCG_TARGET_HAS_mulu2_i64 0
37
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
38
index XXXXXXX..XXXXXXX 100644
39
--- a/tcg/ppc/tcg-target.c.inc
40
+++ b/tcg/ppc/tcg-target.c.inc
41
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
42
}
21
}
43
22
44
static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
23
-static bool is_zero_prod(float32 a, float32 b)
45
- TCGReg dst, TCGReg src)
24
-{
46
+ TCGReg dst, TCGReg src, bool neg)
25
- return ((float32_is_zero(a) && is_finite(b)) ||
26
- (float32_is_zero(b) && is_finite(a)));
27
-}
28
-
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
30
-{
31
- float32 ret = dst;
32
- if (float32_is_any_nan(x)) {
33
- if (extract32(x, 22, 1) == 0) {
34
- float_raise(float_flag_invalid, fp_status);
35
- }
36
- ret = make_float32(0xffffffff); /* nan */
37
- }
38
- return ret;
39
-}
40
-
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
42
float32 RsV, float32 RtV, float32 PuV)
47
{
43
{
48
+ if (neg && (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I64)) {
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
49
+ /*
45
return RxV;
50
+ * X != 0 implies X + -1 generates a carry.
46
}
51
+ * RT = (~X + X) + CA
47
52
+ * = -1 + CA
48
-static bool is_inf_prod(int32_t a, int32_t b)
53
+ * = CA ? 0 : -1
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
54
+ */
50
+ float32 RsV, float32 RtV, int negate)
55
+ tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
51
{
56
+ tcg_out32(s, SUBFE | TAB(dst, src, src));
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
57
+ return;
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
55
+ int flags;
56
+
57
+ arch_fpop_start(env);
58
+
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
60
+ RxV = float32_muladd(RsV, RtV, RxV,
61
+ negate | float_muladd_suppress_add_product_zero,
62
+ &env->fp_status);
63
+
64
+ flags = get_float_exception_flags(&env->fp_status);
65
+ if (flags) {
66
+ /* Flags are suppressed by this instruction. */
67
+ set_float_exception_flags(0, &env->fp_status);
68
+
69
+ /* Return 0 for Inf - Inf. */
70
+ if (flags & float_flag_invalid_isi) {
71
+ RxV = 0;
72
+ }
58
+ }
73
+ }
59
+
74
+
60
if (type == TCG_TYPE_I32) {
75
+ arch_fpop_end(env);
61
tcg_out32(s, CNTLZW | RS(src) | RA(dst));
76
+ return RxV;
62
tcg_out_shri32(s, dst, dst, 5);
63
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
64
tcg_out32(s, CNTLZD | RS(src) | RA(dst));
65
tcg_out_shri64(s, dst, dst, 6);
66
}
67
+ if (neg) {
68
+ tcg_out32(s, NEG | RT(dst) | RA(dst));
69
+ }
70
}
77
}
71
78
72
-static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
73
+static void tcg_out_setcond_ne0(TCGContext *s, TCGType type,
80
float32 RsV, float32 RtV)
74
+ TCGReg dst, TCGReg src, bool neg)
75
{
81
{
76
- /* X != 0 implies X + -1 generates a carry. Extra addition
82
- bool infinp;
77
- trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
83
- bool infminusinf;
78
- if (dst != src) {
84
- float32 tmp;
79
- tcg_out32(s, ADDIC | TAI(dst, src, -1));
85
-
80
- tcg_out32(s, SUBFE | TAB(dst, dst, src));
86
- arch_fpop_start(env);
81
- } else {
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
82
+ if (!neg && (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I64)) {
88
- infminusinf = float32_is_infinity(RxV) &&
83
+ /*
89
- is_inf_prod(RsV, RtV) &&
84
+ * X != 0 implies X + -1 generates a carry. Extra addition
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
85
+ * trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C.
91
- infinp = float32_is_infinity(RxV) ||
86
+ */
92
- float32_is_infinity(RtV) ||
87
tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
93
- float32_is_infinity(RsV);
88
tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
89
+ return;
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
90
+ }
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
91
+ tcg_out_setcond_eq0(s, type, dst, src, false);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
92
+ if (neg) {
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
93
+ tcg_out32(s, ADDI | TAI(dst, dst, -1));
99
- RxV = tmp;
94
+ } else {
100
- }
95
+ tcg_out_xori32(s, dst, dst, 1);
101
- set_float_exception_flags(0, &env->fp_status);
96
}
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
97
}
111
}
98
112
99
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
100
114
float32 RsV, float32 RtV)
101
static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
102
TCGArg arg0, TCGArg arg1, TCGArg arg2,
103
- int const_arg2)
104
+ int const_arg2, bool neg)
105
{
115
{
106
- int crop, sh;
116
- bool infinp;
107
+ int sh;
117
- bool infminusinf;
108
+ bool inv;
118
- float32 tmp;
109
110
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
111
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
113
if (arg2 == 0) {
114
switch (cond) {
115
case TCG_COND_EQ:
116
- tcg_out_setcond_eq0(s, type, arg0, arg1);
117
+ tcg_out_setcond_eq0(s, type, arg0, arg1, neg);
118
return;
119
case TCG_COND_NE:
120
- if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
121
- tcg_out_ext32u(s, TCG_REG_R0, arg1);
122
- arg1 = TCG_REG_R0;
123
- }
124
- tcg_out_setcond_ne0(s, arg0, arg1);
125
+ tcg_out_setcond_ne0(s, type, arg0, arg1, neg);
126
return;
127
case TCG_COND_GE:
128
tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
129
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
130
case TCG_COND_LT:
131
/* Extract the sign bit. */
132
if (type == TCG_TYPE_I32) {
133
- tcg_out_shri32(s, arg0, arg1, 31);
134
+ if (neg) {
135
+ tcg_out_sari32(s, arg0, arg1, 31);
136
+ } else {
137
+ tcg_out_shri32(s, arg0, arg1, 31);
138
+ }
139
} else {
140
- tcg_out_shri64(s, arg0, arg1, 63);
141
+ if (neg) {
142
+ tcg_out_sari64(s, arg0, arg1, 63);
143
+ } else {
144
+ tcg_out_shri64(s, arg0, arg1, 63);
145
+ }
146
}
147
return;
148
default:
149
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
150
151
isel = tcg_to_isel[cond];
152
153
- tcg_out_movi(s, type, arg0, 1);
154
+ tcg_out_movi(s, type, arg0, neg ? -1 : 1);
155
if (isel & 1) {
156
/* arg0 = (bc ? 0 : 1) */
157
tab = TAB(arg0, 0, arg0);
158
@@ -XXX,XX +XXX,XX @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
159
return;
160
}
161
162
+ inv = false;
163
switch (cond) {
164
case TCG_COND_EQ:
165
arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
166
- tcg_out_setcond_eq0(s, type, arg0, arg1);
167
- return;
168
+ tcg_out_setcond_eq0(s, type, arg0, arg1, neg);
169
+ break;
170
171
case TCG_COND_NE:
172
arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
173
- /* Discard the high bits only once, rather than both inputs. */
174
- if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
175
- tcg_out_ext32u(s, TCG_REG_R0, arg1);
176
- arg1 = TCG_REG_R0;
177
- }
178
- tcg_out_setcond_ne0(s, arg0, arg1);
179
- return;
180
+ tcg_out_setcond_ne0(s, type, arg0, arg1, neg);
181
+ break;
182
183
+ case TCG_COND_LE:
184
+ case TCG_COND_LEU:
185
+ inv = true;
186
+ /* fall through */
187
case TCG_COND_GT:
188
case TCG_COND_GTU:
189
- sh = 30;
190
- crop = 0;
191
- goto crtest;
192
-
119
-
193
- case TCG_COND_LT:
120
- arch_fpop_start(env);
194
- case TCG_COND_LTU:
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
195
- sh = 29;
122
- infminusinf = float32_is_infinity(RxV) &&
196
- crop = 0;
123
- is_inf_prod(RsV, RtV) &&
197
+ sh = 30; /* CR7 CR_GT */
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
198
goto crtest;
125
- infinp = float32_is_infinity(RxV) ||
199
126
- float32_is_infinity(RtV) ||
200
case TCG_COND_GE:
127
- float32_is_infinity(RsV);
201
case TCG_COND_GEU:
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
202
- sh = 31;
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
203
- crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
204
+ inv = true;
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
205
+ /* fall through */
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
206
+ case TCG_COND_LT:
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
207
+ case TCG_COND_LTU:
134
- RxV = tmp;
208
+ sh = 29; /* CR7 CR_LT */
135
- }
209
goto crtest;
136
- set_float_exception_flags(0, &env->fp_status);
210
137
- if (float32_is_infinity(RxV) && !infinp) {
211
- case TCG_COND_LE:
138
- RxV = RxV - 1;
212
- case TCG_COND_LEU:
139
- }
213
- sh = 31;
140
- if (infminusinf) {
214
- crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
141
- RxV = 0;
215
crtest:
142
- }
216
tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
143
- arch_fpop_end(env);
217
- if (crop) {
144
- return RxV;
218
- tcg_out32(s, crop);
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
219
- }
146
}
220
tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
147
221
tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
222
+ if (neg && inv) {
223
+ tcg_out32(s, ADDI | TAI(arg0, arg0, -1));
224
+ } else if (neg) {
225
+ tcg_out32(s, NEG | RT(arg0) | RA(arg0));
226
+ } else if (inv) {
227
+ tcg_out_xori32(s, arg0, arg0, 1);
228
+ }
229
break;
230
231
default:
232
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
233
234
case INDEX_op_setcond_i32:
235
tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
236
- const_args[2]);
237
+ const_args[2], false);
238
break;
239
case INDEX_op_setcond_i64:
240
tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
241
- const_args[2]);
242
+ const_args[2], false);
243
+ break;
244
+ case INDEX_op_negsetcond_i32:
245
+ tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
246
+ const_args[2], true);
247
+ break;
248
+ case INDEX_op_negsetcond_i64:
249
+ tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
250
+ const_args[2], true);
251
break;
252
case INDEX_op_setcond2_i32:
253
tcg_out_setcond2(s, args, const_args);
254
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
255
case INDEX_op_rotl_i32:
256
case INDEX_op_rotr_i32:
257
case INDEX_op_setcond_i32:
258
+ case INDEX_op_negsetcond_i32:
259
case INDEX_op_and_i64:
260
case INDEX_op_andc_i64:
261
case INDEX_op_shl_i64:
262
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
263
case INDEX_op_rotl_i64:
264
case INDEX_op_rotr_i64:
265
case INDEX_op_setcond_i64:
266
+ case INDEX_op_negsetcond_i64:
267
return C_O1_I2(r, r, ri);
268
269
case INDEX_op_mul_i32:
270
--
149
--
271
2.34.1
150
2.43.0
diff view generated by jsdifflib
1
It is more useful to allow low-part deposits into all registers
1
The function is now unused.
2
than to restrict allocation for high-byte deposits.
3
2
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/i386/tcg-target-con-set.h | 2 +-
6
target/hexagon/fma_emu.h | 2 -
8
tcg/i386/tcg-target-con-str.h | 1 -
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
9
tcg/i386/tcg-target.h | 4 ++--
8
2 files changed, 173 deletions(-)
10
tcg/i386/tcg-target.c.inc | 7 +++----
11
4 files changed, 6 insertions(+), 8 deletions(-)
12
9
13
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/i386/tcg-target-con-set.h
12
--- a/target/hexagon/fma_emu.h
16
+++ b/tcg/i386/tcg-target-con-set.h
13
+++ b/target/hexagon/fma_emu.h
17
@@ -XXX,XX +XXX,XX @@ C_O1_I1(r, q)
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
18
C_O1_I1(r, r)
15
}
19
C_O1_I1(x, r)
16
int32_t float32_getexp(float32 f32);
20
C_O1_I1(x, x)
17
float32 infinite_float32(uint8_t sign);
21
-C_O1_I2(Q, 0, Q)
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
22
+C_O1_I2(q, 0, q)
19
- int scale, float_status *fp_status);
23
C_O1_I2(q, r, re)
20
float64 internal_mpyhh(float64 a, float64 b,
24
C_O1_I2(r, 0, ci)
21
unsigned long long int accumulated,
25
C_O1_I2(r, 0, r)
22
float_status *fp_status);
26
diff --git a/tcg/i386/tcg-target-con-str.h b/tcg/i386/tcg-target-con-str.h
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
27
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/i386/tcg-target-con-str.h
25
--- a/target/hexagon/fma_emu.c
29
+++ b/tcg/i386/tcg-target-con-str.h
26
+++ b/target/hexagon/fma_emu.c
30
@@ -XXX,XX +XXX,XX @@ REGS('D', 1u << TCG_REG_EDI)
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
31
REGS('r', ALL_GENERAL_REGS)
28
return -1;
32
REGS('x', ALL_VECTOR_REGS)
29
}
33
REGS('q', ALL_BYTEL_REGS) /* regs that can be used as a byte operand */
30
34
-REGS('Q', ALL_BYTEH_REGS) /* regs with a second byte (e.g. %ah) */
31
-static uint64_t float32_getmant(float32 f32)
35
REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_ld/st */
32
-{
36
REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */
33
- Float a = { .i = f32 };
37
34
- if (float32_is_normal(f32)) {
38
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
35
- return a.mant | 1ULL << 23;
39
index XXXXXXX..XXXXXXX 100644
36
- }
40
--- a/tcg/i386/tcg-target.h
37
- if (float32_is_zero(f32)) {
41
+++ b/tcg/i386/tcg-target.h
38
- return 0;
42
@@ -XXX,XX +XXX,XX @@ typedef enum {
39
- }
43
#define TCG_TARGET_HAS_cmpsel_vec -1
40
- if (float32_is_denormal(f32)) {
44
41
- return a.mant;
45
#define TCG_TARGET_deposit_i32_valid(ofs, len) \
42
- }
46
- (((ofs) == 0 && (len) == 8) || ((ofs) == 8 && (len) == 8) || \
43
- return ~0ULL;
47
- ((ofs) == 0 && (len) == 16))
44
-}
48
+ (((ofs) == 0 && ((len) == 8 || (len) == 16)) || \
45
-
49
+ (TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8))
46
int32_t float32_getexp(float32 f32)
50
#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid
47
{
51
48
Float a = { .i = f32 };
52
/* Check for the possibility of high-byte extraction and, for 64-bit,
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
53
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
50
}
54
index XXXXXXX..XXXXXXX 100644
51
55
--- a/tcg/i386/tcg-target.c.inc
52
/* Return a maximum finite value with the requested sign */
56
+++ b/tcg/i386/tcg-target.c.inc
53
-static float32 maxfinite_float32(uint8_t sign)
57
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
54
-{
58
# define TCG_REG_L1 TCG_REG_EDX
55
- if (sign) {
59
#endif
56
- return make_float32(SF_MINUS_MAXF);
60
57
- } else {
61
-#define ALL_BYTEH_REGS 0x0000000fu
58
- return make_float32(SF_MAXF);
62
#if TCG_TARGET_REG_BITS == 64
59
- }
63
# define ALL_GENERAL_REGS 0x0000ffffu
60
-}
64
# define ALL_VECTOR_REGS 0xffff0000u
61
-
65
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
62
-/* Return a zero value with requested sign */
66
#else
63
-static float32 zero_float32(uint8_t sign)
67
# define ALL_GENERAL_REGS 0x000000ffu
64
-{
68
# define ALL_VECTOR_REGS 0x00ff0000u
65
- if (sign) {
69
-# define ALL_BYTEL_REGS ALL_BYTEH_REGS
66
- return make_float32(0x80000000);
70
+# define ALL_BYTEL_REGS 0x0000000fu
67
- } else {
71
#endif
68
- return float32_zero;
72
#ifdef CONFIG_SOFTMMU
69
- }
73
# define SOFTMMU_RESERVE_REGS ((1 << TCG_REG_L0) | (1 << TCG_REG_L1))
70
-}
74
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
71
-
75
if (args[3] == 0 && args[4] == 8) {
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
76
/* load bits 0..7 */
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
77
tcg_out_modrm(s, OPC_MOVB_EvGv | P_REXB_R | P_REXB_RM, a2, a0);
74
{ \
78
- } else if (args[3] == 8 && args[4] == 8) {
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
79
+ } else if (TCG_TARGET_REG_BITS == 32 && args[3] == 8 && args[4] == 8) {
76
}
80
/* load bits 8..15 */
77
81
tcg_out_modrm(s, OPC_MOVB_EvGv, a2, a0 + 4);
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
82
} else if (args[3] == 0 && args[4] == 16) {
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
83
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
80
-
84
81
-static bool is_inf_prod(float64 a, float64 b)
85
case INDEX_op_deposit_i32:
82
-{
86
case INDEX_op_deposit_i64:
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
87
- return C_O1_I2(Q, 0, Q);
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
88
+ return C_O1_I2(q, 0, q);
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
89
86
-}
90
case INDEX_op_setcond_i32:
87
-
91
case INDEX_op_setcond_i64:
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
92
--
219
--
93
2.34.1
220
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
This massive macro is now only used once.
2
Expand it for use only by float64.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
target/arm/tcg/translate-a64.c | 22 +++++++++-------------
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
5
target/arm/tcg/translate.c | 12 ++++--------
8
1 file changed, 127 insertions(+), 128 deletions(-)
6
2 files changed, 13 insertions(+), 21 deletions(-)
7
9
8
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/target/arm/tcg/translate-a64.c
12
--- a/target/hexagon/fma_emu.c
11
+++ b/target/arm/tcg/translate-a64.c
13
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@ static void disas_cond_select(DisasContext *s, uint32_t insn)
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
13
14
if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
15
/* CSET & CSETM. */
16
- tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
17
if (else_inv) {
18
- tcg_gen_neg_i64(tcg_rd, tcg_rd);
19
+ tcg_gen_negsetcond_i64(tcg_invert_cond(c.cond),
20
+ tcg_rd, c.value, zero);
21
+ } else {
22
+ tcg_gen_setcond_i64(tcg_invert_cond(c.cond),
23
+ tcg_rd, c.value, zero);
24
}
25
} else {
26
TCGv_i64 t_true = cpu_reg(s, rn);
27
@@ -XXX,XX +XXX,XX @@ static void handle_3same_64(DisasContext *s, int opcode, bool u,
28
}
29
break;
30
case 0x6: /* CMGT, CMHI */
31
- /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
32
- * We implement this using setcond (test) and then negating.
33
- */
34
cond = u ? TCG_COND_GTU : TCG_COND_GT;
35
do_cmop:
36
- tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
37
- tcg_gen_neg_i64(tcg_rd, tcg_rd);
38
+ /* 64 bit integer comparison, result = test ? -1 : 0. */
39
+ tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
40
break;
41
case 0x7: /* CMGE, CMHS */
42
cond = u ? TCG_COND_GEU : TCG_COND_GE;
43
@@ -XXX,XX +XXX,XX @@ static void handle_2misc_64(DisasContext *s, int opcode, bool u,
44
}
45
break;
46
case 0xa: /* CMLT */
47
- /* 64 bit integer comparison against zero, result is
48
- * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
49
- * subtracting 1.
50
- */
51
cond = TCG_COND_LT;
52
do_cmop:
53
- tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
54
- tcg_gen_neg_i64(tcg_rd, tcg_rd);
55
+ /* 64 bit integer comparison against zero, result is test ? -1 : 0. */
56
+ tcg_gen_negsetcond_i64(cond, tcg_rd, tcg_rn, tcg_constant_i64(0));
57
break;
58
case 0x8: /* CMGT, CMGE */
59
cond = u ? TCG_COND_GE : TCG_COND_GT;
60
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/arm/tcg/translate.c
63
+++ b/target/arm/tcg/translate.c
64
@@ -XXX,XX +XXX,XX @@ void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
65
#define GEN_CMP0(NAME, COND) \
66
static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
67
{ \
68
- tcg_gen_setcondi_i32(COND, d, a, 0); \
69
- tcg_gen_neg_i32(d, d); \
70
+ tcg_gen_negsetcond_i32(COND, d, a, tcg_constant_i32(0)); \
71
} \
72
static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
73
{ \
74
- tcg_gen_setcondi_i64(COND, d, a, 0); \
75
- tcg_gen_neg_i64(d, d); \
76
+ tcg_gen_negsetcond_i64(COND, d, a, tcg_constant_i64(0)); \
77
} \
78
static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
79
{ \
80
@@ -XXX,XX +XXX,XX @@ void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
81
static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
82
{
83
tcg_gen_and_i32(d, a, b);
84
- tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
85
- tcg_gen_neg_i32(d, d);
86
+ tcg_gen_negsetcond_i32(TCG_COND_NE, d, d, tcg_constant_i32(0));
87
}
15
}
88
16
89
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
17
/* Return a maximum finite value with the requested sign */
90
{
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
91
tcg_gen_and_i64(d, a, b);
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
92
- tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
20
-{ \
93
- tcg_gen_neg_i64(d, d);
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
94
+ tcg_gen_negsetcond_i64(TCG_COND_NE, d, d, tcg_constant_i64(0));
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
23
- /* result zero */ \
24
- switch (fp_status->float_rounding_mode) { \
25
- case float_round_down: \
26
- return zero_##SUFFIX(1); \
27
- default: \
28
- return zero_##SUFFIX(0); \
29
- } \
30
- } \
31
- /* Normalize right */ \
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
95
}
271
}
96
272
97
static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
98
--
278
--
99
2.34.1
279
2.43.0
diff view generated by jsdifflib
1
From: Anton Johansson via <qemu-devel@nongnu.org>
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
2
3
3
tlb_addr is changed from target_ulong to uint64_t to match the type of
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
a CPUTLBEntry value, and the addressed is changed to vaddr.
5
6
Signed-off-by: Anton Johansson <anjo@rev.ng>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20230807155706.9580-8-anjo@rev.ng>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
include/exec/cpu-all.h | 4 ++--
7
target/hexagon/fma_emu.c | 16 +++-------------
12
1 file changed, 2 insertions(+), 2 deletions(-)
8
1 file changed, 3 insertions(+), 13 deletions(-)
13
9
14
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu-all.h
12
--- a/target/hexagon/fma_emu.c
17
+++ b/include/exec/cpu-all.h
13
+++ b/target/hexagon/fma_emu.c
18
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
14
@@ -XXX,XX +XXX,XX @@ typedef union {
19
* @addr: virtual address to test (must be page aligned)
15
};
20
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
16
} Double;
21
*/
17
22
-static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
18
-typedef union {
23
+static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
19
- float f;
20
- uint32_t i;
21
- struct {
22
- uint32_t mant:23;
23
- uint32_t exp:8;
24
- uint32_t sign:1;
25
- };
26
-} Float;
27
-
28
static uint64_t float64_getmant(float64 f64)
24
{
29
{
25
return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
30
Double a = { .i = f64 };
26
}
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
27
@@ -XXX,XX +XXX,XX @@ static inline bool tlb_hit_page(target_ulong tlb_addr, target_ulong addr)
32
28
* @addr: virtual address to test (need not be page aligned)
33
int32_t float32_getexp(float32 f32)
29
* @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
30
*/
31
-static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
32
+static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
33
{
34
{
34
return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
35
}
46
}
36
--
47
--
37
2.34.1
48
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
This structure, with bitfields, is incorrect for big-endian.
2
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
Use extract64 and deposit64 instead.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
target/openrisc/translate.c | 6 ++----
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
6
1 file changed, 2 insertions(+), 4 deletions(-)
8
1 file changed, 16 insertions(+), 30 deletions(-)
7
9
8
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/target/openrisc/translate.c
12
--- a/target/hexagon/fma_emu.c
11
+++ b/target/openrisc/translate.c
13
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@ static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb)
14
@@ -XXX,XX +XXX,XX @@
13
15
14
tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb);
16
#define WAY_BIG_EXP 4096
15
tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1);
17
16
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
18
-typedef union {
17
+ tcg_gen_negsetcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0);
19
- double f;
18
20
- uint64_t i;
19
- tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
21
- struct {
20
gen_ove_ov(dc);
22
- uint64_t mant:52;
23
- uint64_t exp:11;
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
- Double a = { .i = f64 };
31
+ uint64_t mant = extract64(f64, 0, 52);
32
if (float64_is_normal(f64)) {
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
21
}
44
}
22
45
23
@@ -XXX,XX +XXX,XX @@ static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb)
46
int32_t float64_getexp(float64 f64)
24
47
{
25
tcg_gen_muls2_i64(cpu_mac, high, t1, t2);
48
- Double a = { .i = f64 };
26
tcg_gen_sari_i64(t1, cpu_mac, 63);
49
+ int exp = extract64(f64, 52, 11);
27
- tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high);
50
if (float64_is_normal(f64)) {
28
+ tcg_gen_negsetcond_i64(TCG_COND_NE, t1, t1, high);
51
- return a.exp;
29
tcg_gen_trunc_i64_tl(cpu_sr_ov, t1);
52
+ return exp;
30
- tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov);
31
32
gen_ove_ov(dc);
33
}
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
64
+ uint64_t ret;
65
+
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
67
&& ((a.guard | a.round | a.sticky) == 0)) {
68
/* result zero */
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
70
}
71
}
72
/* Underflow? */
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
74
+ ret = int128_getlo(a.mant);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
97
}
98
99
float64 internal_mpyhh(float64 a, float64 b,
34
--
100
--
35
2.34.1
101
2.43.0
36
37
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
No need to open-code 64x64->128-bit multiplication.
2
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
target/m68k/translate.c | 24 ++++++++++--------------
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
6
1 file changed, 10 insertions(+), 14 deletions(-)
7
1 file changed, 3 insertions(+), 29 deletions(-)
7
8
8
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/target/m68k/translate.c
11
--- a/target/hexagon/fma_emu.c
11
+++ b/target/m68k/translate.c
12
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@ static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
13
case 14: /* GT (!(Z || (N ^ V))) */
14
return -1;
14
case 15: /* LE (Z || (N ^ V)) */
15
c->v1 = tmp = tcg_temp_new();
16
- tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
17
- tcg_gen_neg_i32(tmp, tmp);
18
+ tcg_gen_negsetcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
19
tmp2 = tcg_temp_new();
20
tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
21
tcg_gen_or_i32(tmp, tmp, tmp2);
22
@@ -XXX,XX +XXX,XX @@ DISAS_INSN(scc)
23
gen_cc_cond(&c, s, cond);
24
25
tmp = tcg_temp_new();
26
- tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
27
+ tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
28
29
- tcg_gen_neg_i32(tmp, tmp);
30
DEST_EA(env, insn, OS_BYTE, tmp, NULL);
31
}
15
}
32
16
33
@@ -XXX,XX +XXX,XX @@ DISAS_INSN(mull)
17
-static uint32_t int128_getw0(Int128 x)
34
tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
18
-{
35
/* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
19
- return int128_getlo(x);
36
tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
20
-}
37
- tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_Z);
21
-
38
+ tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
22
-static uint32_t int128_getw1(Int128 x)
39
+ QREG_CC_V, QREG_CC_Z);
23
-{
40
} else {
24
- return int128_getlo(x) >> 32;
41
tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
25
-}
42
/* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
26
-
43
- tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, QREG_CC_C);
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
44
+ tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
28
{
45
+ QREG_CC_V, QREG_CC_C);
29
- Int128 a, b;
46
}
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
47
- tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
31
+ uint64_t l, h;
48
tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
32
49
33
- a = int128_make64(ai);
50
tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
34
- b = int128_make64(bi);
51
@@ -XXX,XX +XXX,XX @@ static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
52
if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
53
/* if shift count >= bits, V is (reg != 0) */
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
54
if (count >= bits) {
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
55
- tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
39
-
56
+ tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
40
- pp1s = pp1a + pp1b;
57
} else {
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
58
TCGv t0 = tcg_temp_new();
42
- pp2 += (1ULL << 32);
59
tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
43
- }
60
tcg_gen_sari_i32(t0, reg, bits - count - 1);
44
- uint64_t ret_low = pp0 + (pp1s << 32);
61
- tcg_gen_setcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
62
+ tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
46
- pp2 += 1;
63
}
47
- }
64
- tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
48
-
65
}
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
66
} else {
50
+ mulu64(&l, &h, ai, bi);
67
tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
51
+ return int128_make128(l, h);
68
@@ -XXX,XX +XXX,XX @@ static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
69
/* Ignore the bits below the sign bit. */
70
tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
71
/* If any bits remain set, we have overflow. */
72
- tcg_gen_setcondi_i64(TCG_COND_NE, t64, t64, 0);
73
+ tcg_gen_negsetcond_i64(TCG_COND_NE, t64, t64, tcg_constant_i64(0));
74
tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
75
- tcg_gen_neg_i32(QREG_CC_V, QREG_CC_V);
76
}
77
} else {
78
tcg_gen_shli_i64(t64, t64, 32);
79
@@ -XXX,XX +XXX,XX @@ DISAS_INSN(fscc)
80
gen_fcc_cond(&c, s, cond);
81
82
tmp = tcg_temp_new();
83
- tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
84
+ tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
85
86
- tcg_gen_neg_i32(tmp, tmp);
87
DEST_EA(env, insn, OS_BYTE, tmp, NULL);
88
}
52
}
89
53
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
90
--
55
--
91
2.34.1
56
2.43.0
92
93
diff view generated by jsdifflib
1
From: Anton Johansson via <qemu-devel@nongnu.org>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
2
3
3
As we are now using vaddr for representing guest addresses, update the
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
static assert to check that vaddr fits in the run_on_cpu_data union.
5
6
Signed-off-by: Anton Johansson <anjo@rev.ng>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20230807155706.9580-10-anjo@rev.ng>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
accel/tcg/cputlb.c | 5 +++--
7
target/hexagon/fma_emu.c | 2 +-
12
1 file changed, 3 insertions(+), 2 deletions(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
13
9
14
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/cputlb.c
12
--- a/target/hexagon/fma_emu.c
17
+++ b/accel/tcg/cputlb.c
13
+++ b/target/hexagon/fma_emu.c
18
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
19
} while (0)
15
float64_is_infinity(b)) {
20
16
return float64_mul(a, b, fp_status);
21
/* run_on_cpu_data.target_ptr should always be big enough for a
17
}
22
- * target_ulong even on 32 bit builds */
18
- x.mant = int128_mul_6464(accumulated, 1);
23
-QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
19
+ x.mant = int128_make64(accumulated);
24
+ * vaddr even on 32 bit builds
20
x.sticky = sticky;
25
+ */
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
26
+QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
27
28
/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
29
*/
30
--
23
--
31
2.34.1
24
2.43.0
diff view generated by jsdifflib
1
Convert all targets simultaneously, as the gen_intermediate_code
2
function disappears from the target. While there are possible
3
workarounds, they're larger than simply performing the conversion.
4
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
7
---
4
target/m68k/translate.c | 11 ++---------
8
include/exec/translator.h | 14 --------------
5
1 file changed, 2 insertions(+), 9 deletions(-)
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
10
target/alpha/cpu.h | 2 ++
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
6
71
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
73
index XXXXXXX..XXXXXXX 100644
74
--- a/include/exec/translator.h
75
+++ b/include/exec/translator.h
76
@@ -XXX,XX +XXX,XX @@
77
#include "qemu/bswap.h"
78
#include "exec/vaddr.h"
79
80
-/**
81
- * gen_intermediate_code
82
- * @cpu: cpu context
83
- * @tb: translation block
84
- * @max_insns: max number of instructions to translate
85
- * @pc: guest virtual program counter address
86
- * @host_pc: host physical program counter address
87
- *
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
90
- */
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
93
-
94
/**
95
* DisasJumpType:
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
391
index XXXXXXX..XXXXXXX 100644
392
--- a/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
408
/*
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
433
.tb_stop = alpha_tr_tb_stop,
434
};
435
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
563
#endif
564
};
565
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
567
- vaddr pc, void *host_pc)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
569
+ int *max_insns, vaddr pc, void *host_pc)
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
7
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
8
index XXXXXXX..XXXXXXX 100644
641
index XXXXXXX..XXXXXXX 100644
9
--- a/target/m68k/translate.c
642
--- a/target/m68k/translate.c
10
+++ b/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
11
@@ -XXX,XX +XXX,XX @@ static inline int ext_opsize(int ext, int pos)
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
12
*/
645
.tb_stop = m68k_tr_tb_stop,
13
static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
646
};
14
{
647
15
- TCGv tmp;
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
16
switch (opsize) {
649
- vaddr pc, void *host_pc)
17
case OS_BYTE:
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
18
- tcg_gen_andi_i32(reg, reg, 0xffffff00);
651
+ int *max_insns, vaddr pc, void *host_pc)
19
- tmp = tcg_temp_new();
652
{
20
- tcg_gen_ext8u_i32(tmp, val);
653
DisasContext dc;
21
- tcg_gen_or_i32(reg, reg, tmp);
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
22
+ tcg_gen_deposit_i32(reg, reg, val, 0, 8);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
23
break;
656
index XXXXXXX..XXXXXXX 100644
24
case OS_WORD:
657
--- a/target/microblaze/cpu.c
25
- tcg_gen_andi_i32(reg, reg, 0xffff0000);
658
+++ b/target/microblaze/cpu.c
26
- tmp = tcg_temp_new();
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
27
- tcg_gen_ext16u_i32(tmp, val);
660
28
- tcg_gen_or_i32(reg, reg, tmp);
661
static const TCGCPUOps mb_tcg_ops = {
29
+ tcg_gen_deposit_i32(reg, reg, val, 0, 16);
662
.initialize = mb_tcg_init,
30
break;
663
+ .translate_code = mb_translate_code,
31
case OS_LONG:
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
32
case OS_SINGLE:
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
33
--
953
--
34
2.34.1
954
2.43.0
35
955
36
956
diff view generated by jsdifflib