1
The following changes since commit 0a301624c2f4ced3331ffd5bce85b4274fe132af:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20220208' into staging (2022-02-08 11:40:08 +0000)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220211
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to 5c1a101ef6b85537a4ade93c39ea81cadd5c246e:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
tests/tcg/multiarch: Add sigbus.c (2022-02-09 09:00:01 +1100)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Fix safe_syscall_base for sparc64.
14
tcg/optimize: Remove in-flight mask data from OptContext
15
Fix host signal handling for sparc64-linux.
15
fpu: Add float*_muladd_scalbn
16
Speedups for jump cache and work list probing.
16
fpu: Remove float_muladd_halve_result
17
Fix for exception replays.
17
fpu: Add float_round_nearest_even_max
18
Raise guest SIGBUS for user-only misaligned accesses.
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
19
21
20
----------------------------------------------------------------
22
----------------------------------------------------------------
21
Idan Horowitz (2):
23
Ilya Leoshkevich (1):
22
accel/tcg: Optimize jump cache flush during tlb range flush
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
23
softmmu/cpus: Check if the cpu work list is empty atomically
24
25
25
Pavel Dovgalyuk (1):
26
Pierrick Bouvier (1):
26
replay: use CF_NOIRQ for special exception-replaying TB
27
plugins: optimize cpu_index code generation
27
28
28
Richard Henderson (29):
29
Richard Henderson (70):
29
common-user/host/sparc64: Fix safe_syscall_base
30
tcg/optimize: Split out finish_bb, finish_ebb
30
linux-user: Introduce host_signal_mask
31
tcg/optimize: Split out fold_affected_mask
31
linux-user: Introduce host_sigcontext
32
tcg/optimize: Copy mask writeback to fold_masks
32
linux-user: Move sparc/host-signal.h to sparc64/host-signal.h
33
tcg/optimize: Split out fold_masks_zs
33
linux-user/include/host/sparc64: Fix host_sigcontext
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
34
tcg/i386: Support raising sigbus for user-only
35
tcg/optimize: Change representation of s_mask
35
tcg/aarch64: Support raising sigbus for user-only
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
36
tcg/ppc: Support raising sigbus for user-only
37
tcg/optimize: Introduce const value accessors for TempOptInfo
37
tcg/riscv: Support raising sigbus for user-only
38
tcg/optimize: Use fold_masks_zs in fold_and
38
tcg/s390x: Support raising sigbus for user-only
39
tcg/optimize: Use fold_masks_zs in fold_andc
39
tcg/tci: Support raising sigbus for user-only
40
tcg/optimize: Use fold_masks_zs in fold_bswap
40
tcg/arm: Drop support for armv4 and armv5 hosts
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
41
tcg/arm: Remove use_armv5t_instructions
42
tcg/optimize: Use fold_masks_z in fold_ctpop
42
tcg/arm: Remove use_armv6_instructions
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
43
tcg/arm: Check alignment for ldrd and strd
44
tcg/optimize: Compute sign mask in fold_deposit
44
tcg/arm: Support unaligned access for softmmu
45
tcg/optimize: Use finish_folding in fold_divide
45
tcg/arm: Reserve a register for guest_base
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
46
tcg/arm: Support raising sigbus for user-only
47
tcg/optimize: Use fold_masks_s in fold_eqv
47
tcg/mips: Support unaligned access for user-only
48
tcg/optimize: Use fold_masks_z in fold_extract
48
tcg/mips: Support unaligned access for softmmu
49
tcg/optimize: Use finish_folding in fold_extract2
49
tcg/sparc: Use tcg_out_movi_imm13 in tcg_out_addsub2_i64
50
tcg/optimize: Use fold_masks_zs in fold_exts
50
tcg/sparc: Split out tcg_out_movi_imm32
51
tcg/optimize: Use fold_masks_z in fold_extu
51
tcg/sparc: Add scratch argument to tcg_out_movi_int
52
tcg/optimize: Use fold_masks_zs in fold_movcond
52
tcg/sparc: Improve code gen for shifted 32-bit constants
53
tcg/optimize: Use finish_folding in fold_mul*
53
tcg/sparc: Convert patch_reloc to return bool
54
tcg/optimize: Use fold_masks_s in fold_nand
54
tcg/sparc: Use the constant pool for 64-bit constants
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
55
tcg/sparc: Add tcg_out_jmpl_const for better tail calls
56
tcg/optimize: Use fold_masks_s in fold_nor
56
tcg/sparc: Support unaligned access for user-only
57
tcg/optimize: Use fold_masks_s in fold_not
57
tests/tcg/multiarch: Add sigbus.c
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
58
100
59
WANG Xuerui (2):
101
include/exec/translator.h | 14 -
60
tcg/loongarch64: Fix fallout from recent MO_Q renaming
102
include/fpu/softfloat-types.h | 2 +
61
tcg/loongarch64: Support raising sigbus for user-only
103
include/fpu/softfloat.h | 14 +-
62
104
include/hw/core/tcg-cpu-ops.h | 13 +
63
linux-user/include/host/aarch64/host-signal.h | 16 +-
105
target/alpha/cpu.h | 2 +
64
linux-user/include/host/alpha/host-signal.h | 14 +-
106
target/arm/internals.h | 2 +
65
linux-user/include/host/arm/host-signal.h | 14 +-
107
target/avr/cpu.h | 2 +
66
linux-user/include/host/i386/host-signal.h | 14 +-
108
target/hexagon/cpu.h | 2 +
67
linux-user/include/host/loongarch64/host-signal.h | 14 +-
109
target/hexagon/fma_emu.h | 3 -
68
linux-user/include/host/mips/host-signal.h | 14 +-
110
target/hppa/cpu.h | 2 +
69
linux-user/include/host/ppc/host-signal.h | 14 +-
111
target/i386/tcg/helper-tcg.h | 2 +
70
linux-user/include/host/riscv/host-signal.h | 14 +-
112
target/loongarch/internals.h | 2 +
71
linux-user/include/host/s390/host-signal.h | 14 +-
113
target/m68k/cpu.h | 2 +
72
linux-user/include/host/sparc/host-signal.h | 63 ----
114
target/microblaze/cpu.h | 2 +
73
linux-user/include/host/sparc64/host-signal.h | 65 +++-
115
target/mips/tcg/tcg-internal.h | 2 +
74
linux-user/include/host/x86_64/host-signal.h | 14 +-
116
target/openrisc/cpu.h | 2 +
75
tcg/aarch64/tcg-target.h | 2 -
117
target/ppc/cpu.h | 2 +
76
tcg/arm/tcg-target.h | 6 +-
118
target/riscv/cpu.h | 3 +
77
tcg/i386/tcg-target.h | 2 -
119
target/rx/cpu.h | 2 +
78
tcg/loongarch64/tcg-target.h | 2 -
120
target/s390x/s390x-internal.h | 2 +
79
tcg/mips/tcg-target.h | 2 -
121
target/sh4/cpu.h | 2 +
80
tcg/ppc/tcg-target.h | 2 -
122
target/sparc/cpu.h | 2 +
81
tcg/riscv/tcg-target.h | 2 -
123
target/sparc/helper.h | 4 +-
82
tcg/s390x/tcg-target.h | 2 -
124
target/tricore/cpu.h | 2 +
83
accel/tcg/cpu-exec.c | 3 +-
125
target/xtensa/cpu.h | 2 +
84
accel/tcg/cputlb.c | 9 +
126
accel/tcg/cpu-exec.c | 8 +-
85
linux-user/signal.c | 22 +-
127
accel/tcg/plugin-gen.c | 9 +
86
softmmu/cpus.c | 7 +-
128
accel/tcg/translate-all.c | 8 +-
87
tcg/tci.c | 20 +-
129
fpu/softfloat.c | 63 +--
88
tests/tcg/multiarch/sigbus.c | 68 ++++
130
target/alpha/cpu.c | 1 +
89
tcg/aarch64/tcg-target.c.inc | 91 ++++-
131
target/alpha/translate.c | 4 +-
90
tcg/arm/tcg-target.c.inc | 410 +++++++++-------------
132
target/arm/cpu.c | 1 +
91
tcg/i386/tcg-target.c.inc | 103 +++++-
133
target/arm/tcg/cpu-v7m.c | 1 +
92
tcg/loongarch64/tcg-target.c.inc | 73 +++-
134
target/arm/tcg/helper-a64.c | 6 +-
93
tcg/mips/tcg-target.c.inc | 387 ++++++++++++++++++--
135
target/arm/tcg/translate.c | 5 +-
94
tcg/ppc/tcg-target.c.inc | 98 +++++-
136
target/avr/cpu.c | 1 +
95
tcg/riscv/tcg-target.c.inc | 63 +++-
137
target/avr/translate.c | 6 +-
96
tcg/s390x/tcg-target.c.inc | 59 +++-
138
target/hexagon/cpu.c | 1 +
97
tcg/sparc/tcg-target.c.inc | 348 +++++++++++++++---
139
target/hexagon/fma_emu.c | 496 ++++++---------------
98
common-user/host/sparc64/safe-syscall.inc.S | 5 +-
140
target/hexagon/op_helper.c | 125 ++----
99
36 files changed, 1561 insertions(+), 495 deletions(-)
141
target/hexagon/translate.c | 4 +-
100
delete mode 100644 linux-user/include/host/sparc/host-signal.h
142
target/hppa/cpu.c | 1 +
101
create mode 100644 tests/tcg/multiarch/sigbus.c
143
target/hppa/translate.c | 4 +-
102
144
target/i386/tcg/tcg-cpu.c | 1 +
145
target/i386/tcg/translate.c | 5 +-
146
target/loongarch/cpu.c | 1 +
147
target/loongarch/tcg/translate.c | 4 +-
148
target/m68k/cpu.c | 1 +
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
1
From: Idan Horowitz <idan.horowitz@gmail.com>
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
2
3
When the length of the range is large enough, clearing the whole cache is
3
When running with a single vcpu, we can return a constant instead of a
4
faster than iterating over the (possibly extremely large) set of pages
4
load when accessing cpu_index.
5
contained in the range.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
6
8
7
This mimics the pre-existing similar optimization done on the flush of the
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
tlb itself.
9
10
Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com>
11
Message-Id: <20220110164754.1066025-1-idan.horowitz@gmail.com>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
14
---
13
---
15
accel/tcg/cputlb.c | 9 +++++++++
14
accel/tcg/plugin-gen.c | 9 +++++++++
16
1 file changed, 9 insertions(+)
15
1 file changed, 9 insertions(+)
17
16
18
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
19
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
20
--- a/accel/tcg/cputlb.c
19
--- a/accel/tcg/plugin-gen.c
21
+++ b/accel/tcg/cputlb.c
20
+++ b/accel/tcg/plugin-gen.c
22
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
23
}
22
24
qemu_spin_unlock(&env_tlb(env)->c.lock);
23
static TCGv_i32 gen_cpu_index(void)
25
24
{
26
+ /*
25
+ /*
27
+ * If the length is larger than the jump cache size, then it will take
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
28
+ * longer to clear each entry individually than it will to clear it all.
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
29
+ */
30
+ */
30
+ if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
31
+ cpu_tb_jmp_cache_clear(cpu);
32
+ return tcg_constant_i32(current_cpu->cpu_index);
32
+ return;
33
+ }
33
+ }
34
+
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
tb_flush_jmp_cache(cpu, d.addr + i);
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
}
38
--
37
--
39
2.25.1
38
2.43.0
40
41
diff view generated by jsdifflib
1
Handle 32-bit constants with a separate function, so that
1
Call them directly from the opcode switch statement in tcg_optimize,
2
tcg_out_movi_int does not need to recurse. This slightly
2
rather than in finish_folding based on opcode flags. Adjust folding
3
rearranges the order of tests for small constants, but
3
of conditional branches to match.
4
produces the same output.
5
4
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
tcg/sparc/tcg-target.c.inc | 36 +++++++++++++++++++++---------------
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
10
1 file changed, 21 insertions(+), 15 deletions(-)
9
1 file changed, 31 insertions(+), 16 deletions(-)
11
10
12
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/sparc/tcg-target.c.inc
13
--- a/tcg/optimize.c
15
+++ b/tcg/sparc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
17
tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
16
}
18
}
17
}
19
18
20
+static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
19
+static void finish_bb(OptContext *ctx)
21
+{
20
+{
22
+ if (check_fit_i32(arg, 13)) {
21
+ /* We only optimize memory barriers across basic blocks. */
23
+ /* A 13-bit constant sign-extended to 64-bits. */
22
+ ctx->prev_mb = NULL;
24
+ tcg_out_movi_imm13(s, ret, arg);
25
+ } else {
26
+ /* A 32-bit constant zero-extended to 64 bits. */
27
+ tcg_out_sethi(s, ret, arg);
28
+ if (arg & 0x3ff) {
29
+ tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
30
+ }
31
+ }
32
+}
23
+}
33
+
24
+
34
static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
25
+static void finish_ebb(OptContext *ctx)
35
tcg_target_long arg, bool in_prologue)
26
+{
27
+ finish_bb(ctx);
28
+ /* We only optimize across extended basic blocks. */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
30
+ remove_mem_copy_all(ctx);
31
+}
32
+
33
static void finish_folding(OptContext *ctx, TCGOp *op)
36
{
34
{
37
tcg_target_long hi, lo = (int32_t)arg;
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
38
tcg_target_long test, lsb;
36
int i, nb_oargs;
39
37
40
- /* Make sure we test 32-bit constants for imm13 properly. */
38
- /*
41
- if (type == TCG_TYPE_I32) {
39
- * We only optimize extended basic blocks. If the opcode ends a BB
42
- arg = lo;
40
- * and is not a conditional branch, reset all temp data.
43
+ /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
41
- */
44
+ if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
42
- if (def->flags & TCG_OPF_BB_END) {
45
+ tcg_out_movi_imm32(s, ret, arg);
43
- ctx->prev_mb = NULL;
46
+ return;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
47
}
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
48
46
- remove_mem_copy_all(ctx);
49
/* A 13-bit constant sign-extended to 64-bits. */
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
51
}
52
}
53
54
- /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
55
- if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
56
- tcg_out_sethi(s, ret, arg);
57
- if (arg & 0x3ff) {
58
- tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
59
- }
47
- }
60
- return;
48
- return;
61
- }
49
- }
62
-
50
-
63
/* A 32-bit constant sign-extended to 64-bits. */
51
nb_oargs = def->nb_oargs;
64
if (arg == lo) {
52
for (i = 0; i < nb_oargs; i++) {
65
tcg_out_sethi(s, ret, ~arg);
53
TCGTemp *ts = arg_temp(op->args[i]);
66
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
67
/* A 64-bit constant decomposed into 2 32-bit pieces. */
55
if (i > 0) {
68
if (check_fit_i32(lo, 13)) {
56
op->opc = INDEX_op_br;
69
hi = (arg - lo) >> 32;
57
op->args[0] = op->args[3];
70
- tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
58
+ finish_ebb(ctx);
71
+ tcg_out_movi_imm32(s, ret, hi);
59
+ } else {
72
tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
60
+ finish_bb(ctx);
73
tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
74
} else {
75
hi = arg >> 32;
76
- tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
77
- tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
78
+ tcg_out_movi_imm32(s, ret, hi);
79
+ tcg_out_movi_imm32(s, TCG_REG_T2, lo);
80
tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
81
tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
82
}
61
}
62
- return false;
63
+ return true;
64
}
65
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
68
}
69
op->opc = INDEX_op_br;
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
76
+
77
+ finish_bb(ctx);
78
+ return true;
79
}
80
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
83
CASE_OP_32_64_VEC(xor):
84
done = fold_xor(&ctx, op);
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
83
--
97
--
84
2.25.1
98
2.43.0
85
86
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/arm/tcg-target.h | 2 -
7
tcg/optimize.c | 15 ++++++++++++---
5
tcg/arm/tcg-target.c.inc | 83 +++++++++++++++++++++++++++++++++++++++-
8
1 file changed, 12 insertions(+), 3 deletions(-)
6
2 files changed, 81 insertions(+), 4 deletions(-)
7
9
8
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/arm/tcg-target.h
12
--- a/tcg/optimize.c
11
+++ b/tcg/arm/tcg-target.h
13
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
13
/* not defined -- call should be eliminated at compile time */
15
return fold_const2(ctx, op);
14
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
15
16
-#ifdef CONFIG_SOFTMMU
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif
22
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/arm/tcg-target.c.inc
25
+++ b/tcg/arm/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
27
*/
28
29
#include "elf.h"
30
+#include "../tcg-ldst.c.inc"
31
#include "../tcg-pool.c.inc"
32
33
int arm_arch = __ARM_ARCH;
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
35
}
16
}
36
17
37
#ifdef CONFIG_SOFTMMU
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
38
-#include "../tcg-ldst.c.inc"
19
+/*
39
-
20
+ * Record "zero" and "sign" masks for the single output of @op.
40
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
21
+ * See TempOptInfo definition of z_mask and s_mask.
41
* int mmu_idx, uintptr_t ra)
22
+ * If z_mask allows, fold the output to constant zero.
42
*/
23
+ */
43
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
44
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
45
return true;
33
return true;
46
}
34
}
47
+#else
35
48
+
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
49
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
50
+ TCGReg addrhi, unsigned a_bits)
51
+{
37
+{
52
+ unsigned a_mask = (1 << a_bits) - 1;
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
53
+ TCGLabelQemuLdst *label = new_ldst_label(s);
54
+
55
+ label->is_ld = is_ld;
56
+ label->addrlo_reg = addrlo;
57
+ label->addrhi_reg = addrhi;
58
+
59
+ /* We are expecting a_bits to max out at 7, and can easily support 8. */
60
+ tcg_debug_assert(a_mask <= 0xff);
61
+ /* tst addr, #mask */
62
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
63
+
64
+ /* blne slow_path */
65
+ label->label_ptr[0] = s->code_ptr;
66
+ tcg_out_bl_imm(s, COND_NE, 0);
67
+
68
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr);
69
+}
39
+}
70
+
40
+
71
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
41
/*
72
+{
42
* An "affected" mask bit is 0 if and only if the result is identical
73
+ if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
43
* to the first input. Thus if the entire mask is 0, the operation
74
+ return false;
75
+ }
76
+
77
+ if (TARGET_LONG_BITS == 64) {
78
+ /* 64-bit target address is aligned into R2:R3. */
79
+ if (l->addrhi_reg != TCG_REG_R2) {
80
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
81
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
82
+ } else if (l->addrlo_reg != TCG_REG_R3) {
83
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
84
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
85
+ } else {
86
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2);
87
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3);
88
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1);
89
+ }
90
+ } else {
91
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg);
92
+ }
93
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0);
94
+
95
+ /*
96
+ * Tail call to the helper, with the return address back inline,
97
+ * just for the clarity of the debugging traceback -- the helper
98
+ * cannot return. We have used BLNE to arrive here, so LR is
99
+ * already set.
100
+ */
101
+ tcg_out_goto(s, COND_AL, (const void *)
102
+ (l->is_ld ? helper_unaligned_ld : helper_unaligned_st));
103
+ return true;
104
+}
105
+
106
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
107
+{
108
+ return tcg_out_fail_alignment(s, l);
109
+}
110
+
111
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
112
+{
113
+ return tcg_out_fail_alignment(s, l);
114
+}
115
#endif /* SOFTMMU */
116
117
static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
118
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
119
int mem_index;
120
TCGReg addend;
121
tcg_insn_unit *label_ptr;
122
+#else
123
+ unsigned a_bits;
124
#endif
125
126
datalo = *args++;
127
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
128
add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
129
s->code_ptr, label_ptr);
130
#else /* !CONFIG_SOFTMMU */
131
+ a_bits = get_alignment_bits(opc);
132
+ if (a_bits) {
133
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
134
+ }
135
if (guest_base) {
136
tcg_out_qemu_ld_index(s, opc, datalo, datahi,
137
addrlo, TCG_REG_GUEST_BASE, false);
138
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
139
int mem_index;
140
TCGReg addend;
141
tcg_insn_unit *label_ptr;
142
+#else
143
+ unsigned a_bits;
144
#endif
145
146
datalo = *args++;
147
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
148
add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
149
s->code_ptr, label_ptr);
150
#else /* !CONFIG_SOFTMMU */
151
+ a_bits = get_alignment_bits(opc);
152
+ if (a_bits) {
153
+ tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
154
+ }
155
if (guest_base) {
156
tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi,
157
addrlo, TCG_REG_GUEST_BASE, false);
158
--
44
--
159
2.25.1
45
2.43.0
160
161
diff view generated by jsdifflib
1
Do not directly access ucontext_t as the third signal parameter.
1
Consider the passed s_mask to be a minimum deduced from
2
This is preparation for a sparc64 fix.
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
3
5
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
linux-user/include/host/aarch64/host-signal.h | 13 ++++++++-----
9
tcg/optimize.c | 21 ++++++---------------
9
linux-user/include/host/alpha/host-signal.h | 11 +++++++----
10
1 file changed, 6 insertions(+), 15 deletions(-)
10
linux-user/include/host/arm/host-signal.h | 11 +++++++----
11
linux-user/include/host/i386/host-signal.h | 11 +++++++----
12
linux-user/include/host/loongarch64/host-signal.h | 11 +++++++----
13
linux-user/include/host/mips/host-signal.h | 11 +++++++----
14
linux-user/include/host/ppc/host-signal.h | 11 +++++++----
15
linux-user/include/host/riscv/host-signal.h | 11 +++++++----
16
linux-user/include/host/s390/host-signal.h | 11 +++++++----
17
linux-user/include/host/sparc/host-signal.h | 11 +++++++----
18
linux-user/include/host/x86_64/host-signal.h | 11 +++++++----
19
linux-user/signal.c | 4 ++--
20
12 files changed, 80 insertions(+), 47 deletions(-)
21
11
22
diff --git a/linux-user/include/host/aarch64/host-signal.h b/linux-user/include/host/aarch64/host-signal.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
23
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
24
--- a/linux-user/include/host/aarch64/host-signal.h
14
--- a/tcg/optimize.c
25
+++ b/linux-user/include/host/aarch64/host-signal.h
15
+++ b/tcg/optimize.c
26
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
#ifndef AARCH64_HOST_SIGNAL_H
17
* Record "zero" and "sign" masks for the single output of @op.
28
#define AARCH64_HOST_SIGNAL_H
18
* See TempOptInfo definition of z_mask and s_mask.
29
19
* If z_mask allows, fold the output to constant zero.
30
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
20
+ * The passed s_mask may be augmented by z_mask.
31
+typedef ucontext_t host_sigcontext;
21
*/
32
+
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
33
/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */
23
uint64_t z_mask, uint64_t s_mask)
34
#ifndef ESR_MAGIC
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
35
#define ESR_MAGIC 0x45535201
25
36
@@ -XXX,XX +XXX,XX @@ struct esr_context {
26
ti = ts_info(ts);
37
};
27
ti->z_mask = z_mask;
38
#endif
28
- ti->s_mask = s_mask;
39
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
40
-static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc)
30
return true;
41
+static inline struct _aarch64_ctx *first_ctx(host_sigcontext *uc)
42
{
43
return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved;
44
}
31
}
45
@@ -XXX,XX +XXX,XX @@ static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr)
32
46
return (struct _aarch64_ctx *)((char *)hdr + hdr->size);
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
34
default:
35
g_assert_not_reached();
36
}
37
- s_mask = smask_from_zmask(z_mask);
38
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
47
}
57
}
48
58
49
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
50
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
60
default:
51
{
61
g_assert_not_reached();
52
return uc->uc_mcontext.pc;
62
}
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
64
return false;
53
}
65
}
54
66
55
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
56
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
68
return true;
57
{
69
}
58
uc->uc_mcontext.pc = pc;
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
59
}
74
}
60
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
61
-static inline void *host_signal_mask(ucontext_t *uc)
76
}
62
+static inline void *host_signal_mask(host_sigcontext *uc)
77
63
{
78
ctx->z_mask = z_mask;
64
return &uc->uc_sigmask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
65
}
103
}
66
104
67
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
68
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
106
}
69
{
107
70
struct _aarch64_ctx *hdr;
108
ctx->z_mask = 1;
71
uint32_t insn;
109
- ctx->s_mask = smask_from_zmask(1);
72
diff --git a/linux-user/include/host/alpha/host-signal.h b/linux-user/include/host/alpha/host-signal.h
110
return false;
73
index XXXXXXX..XXXXXXX 100644
111
74
--- a/linux-user/include/host/alpha/host-signal.h
112
do_setcond_const:
75
+++ b/linux-user/include/host/alpha/host-signal.h
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
76
@@ -XXX,XX +XXX,XX @@
114
break;
77
#ifndef ALPHA_HOST_SIGNAL_H
115
CASE_OP_32_64(ld8u):
78
#define ALPHA_HOST_SIGNAL_H
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
79
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
80
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
118
break;
81
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
119
CASE_OP_32_64(ld16s):
82
+typedef ucontext_t host_sigcontext;
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
83
+
121
break;
84
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
122
CASE_OP_32_64(ld16u):
85
{
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
86
return uc->uc_mcontext.sc_pc;
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
87
}
125
break;
88
126
case INDEX_op_ld32s_i64:
89
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
90
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
128
break;
91
{
129
case INDEX_op_ld32u_i64:
92
uc->uc_mcontext.sc_pc = pc;
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
93
}
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
94
132
break;
95
-static inline void *host_signal_mask(ucontext_t *uc)
133
default:
96
+static inline void *host_signal_mask(host_sigcontext *uc)
134
g_assert_not_reached();
97
{
98
return &uc->uc_sigmask;
99
}
100
101
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
102
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
103
{
104
uint32_t *pc = (uint32_t *)host_signal_pc(uc);
105
uint32_t insn = *pc;
106
diff --git a/linux-user/include/host/arm/host-signal.h b/linux-user/include/host/arm/host-signal.h
107
index XXXXXXX..XXXXXXX 100644
108
--- a/linux-user/include/host/arm/host-signal.h
109
+++ b/linux-user/include/host/arm/host-signal.h
110
@@ -XXX,XX +XXX,XX @@
111
#ifndef ARM_HOST_SIGNAL_H
112
#define ARM_HOST_SIGNAL_H
113
114
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
115
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
116
+typedef ucontext_t host_sigcontext;
117
+
118
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
119
{
120
return uc->uc_mcontext.arm_pc;
121
}
122
123
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
124
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
125
{
126
uc->uc_mcontext.arm_pc = pc;
127
}
128
129
-static inline void *host_signal_mask(ucontext_t *uc)
130
+static inline void *host_signal_mask(host_sigcontext *uc)
131
{
132
return &uc->uc_sigmask;
133
}
134
135
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
136
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
137
{
138
/*
139
* In the FSR, bit 11 is WnR, assuming a v6 or
140
diff --git a/linux-user/include/host/i386/host-signal.h b/linux-user/include/host/i386/host-signal.h
141
index XXXXXXX..XXXXXXX 100644
142
--- a/linux-user/include/host/i386/host-signal.h
143
+++ b/linux-user/include/host/i386/host-signal.h
144
@@ -XXX,XX +XXX,XX @@
145
#ifndef I386_HOST_SIGNAL_H
146
#define I386_HOST_SIGNAL_H
147
148
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
149
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
150
+typedef ucontext_t host_sigcontext;
151
+
152
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
153
{
154
return uc->uc_mcontext.gregs[REG_EIP];
155
}
156
157
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
158
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
159
{
160
uc->uc_mcontext.gregs[REG_EIP] = pc;
161
}
162
163
-static inline void *host_signal_mask(ucontext_t *uc)
164
+static inline void *host_signal_mask(host_sigcontext *uc)
165
{
166
return &uc->uc_sigmask;
167
}
168
169
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
170
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
171
{
172
return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
173
&& (uc->uc_mcontext.gregs[REG_ERR] & 0x2);
174
diff --git a/linux-user/include/host/loongarch64/host-signal.h b/linux-user/include/host/loongarch64/host-signal.h
175
index XXXXXXX..XXXXXXX 100644
176
--- a/linux-user/include/host/loongarch64/host-signal.h
177
+++ b/linux-user/include/host/loongarch64/host-signal.h
178
@@ -XXX,XX +XXX,XX @@
179
#ifndef LOONGARCH64_HOST_SIGNAL_H
180
#define LOONGARCH64_HOST_SIGNAL_H
181
182
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
183
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
184
+typedef ucontext_t host_sigcontext;
185
+
186
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
187
{
188
return uc->uc_mcontext.__pc;
189
}
190
191
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
192
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
193
{
194
uc->uc_mcontext.__pc = pc;
195
}
196
197
-static inline void *host_signal_mask(ucontext_t *uc)
198
+static inline void *host_signal_mask(host_sigcontext *uc)
199
{
200
return &uc->uc_sigmask;
201
}
202
203
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
204
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
205
{
206
const uint32_t *pinsn = (const uint32_t *)host_signal_pc(uc);
207
uint32_t insn = pinsn[0];
208
diff --git a/linux-user/include/host/mips/host-signal.h b/linux-user/include/host/mips/host-signal.h
209
index XXXXXXX..XXXXXXX 100644
210
--- a/linux-user/include/host/mips/host-signal.h
211
+++ b/linux-user/include/host/mips/host-signal.h
212
@@ -XXX,XX +XXX,XX @@
213
#ifndef MIPS_HOST_SIGNAL_H
214
#define MIPS_HOST_SIGNAL_H
215
216
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
217
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
218
+typedef ucontext_t host_sigcontext;
219
+
220
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
221
{
222
return uc->uc_mcontext.pc;
223
}
224
225
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
226
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
227
{
228
uc->uc_mcontext.pc = pc;
229
}
230
231
-static inline void *host_signal_mask(ucontext_t *uc)
232
+static inline void *host_signal_mask(host_sigcontext *uc)
233
{
234
return &uc->uc_sigmask;
235
}
236
@@ -XXX,XX +XXX,XX @@ static inline void *host_signal_mask(ucontext_t *uc)
237
#error "Unsupported encoding"
238
#endif
239
240
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
241
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
242
{
243
uint32_t insn = *(uint32_t *)host_signal_pc(uc);
244
245
diff --git a/linux-user/include/host/ppc/host-signal.h b/linux-user/include/host/ppc/host-signal.h
246
index XXXXXXX..XXXXXXX 100644
247
--- a/linux-user/include/host/ppc/host-signal.h
248
+++ b/linux-user/include/host/ppc/host-signal.h
249
@@ -XXX,XX +XXX,XX @@
250
#ifndef PPC_HOST_SIGNAL_H
251
#define PPC_HOST_SIGNAL_H
252
253
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
254
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
255
+typedef ucontext_t host_sigcontext;
256
+
257
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
258
{
259
return uc->uc_mcontext.regs->nip;
260
}
261
262
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
263
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
264
{
265
uc->uc_mcontext.regs->nip = pc;
266
}
267
268
-static inline void *host_signal_mask(ucontext_t *uc)
269
+static inline void *host_signal_mask(host_sigcontext *uc)
270
{
271
return &uc->uc_sigmask;
272
}
273
274
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
275
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
276
{
277
return uc->uc_mcontext.regs->trap != 0x400
278
&& (uc->uc_mcontext.regs->dsisr & 0x02000000);
279
diff --git a/linux-user/include/host/riscv/host-signal.h b/linux-user/include/host/riscv/host-signal.h
280
index XXXXXXX..XXXXXXX 100644
281
--- a/linux-user/include/host/riscv/host-signal.h
282
+++ b/linux-user/include/host/riscv/host-signal.h
283
@@ -XXX,XX +XXX,XX @@
284
#ifndef RISCV_HOST_SIGNAL_H
285
#define RISCV_HOST_SIGNAL_H
286
287
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
288
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
289
+typedef ucontext_t host_sigcontext;
290
+
291
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
292
{
293
return uc->uc_mcontext.__gregs[REG_PC];
294
}
295
296
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
297
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
298
{
299
uc->uc_mcontext.__gregs[REG_PC] = pc;
300
}
301
302
-static inline void *host_signal_mask(ucontext_t *uc)
303
+static inline void *host_signal_mask(host_sigcontext *uc)
304
{
305
return &uc->uc_sigmask;
306
}
307
308
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
309
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
310
{
311
/*
312
* Detect store by reading the instruction at the program counter.
313
diff --git a/linux-user/include/host/s390/host-signal.h b/linux-user/include/host/s390/host-signal.h
314
index XXXXXXX..XXXXXXX 100644
315
--- a/linux-user/include/host/s390/host-signal.h
316
+++ b/linux-user/include/host/s390/host-signal.h
317
@@ -XXX,XX +XXX,XX @@
318
#ifndef S390_HOST_SIGNAL_H
319
#define S390_HOST_SIGNAL_H
320
321
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
322
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
323
+typedef ucontext_t host_sigcontext;
324
+
325
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
326
{
327
return uc->uc_mcontext.psw.addr;
328
}
329
330
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
331
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
332
{
333
uc->uc_mcontext.psw.addr = pc;
334
}
335
336
-static inline void *host_signal_mask(ucontext_t *uc)
337
+static inline void *host_signal_mask(host_sigcontext *uc)
338
{
339
return &uc->uc_sigmask;
340
}
341
342
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
343
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
344
{
345
uint16_t *pinsn = (uint16_t *)host_signal_pc(uc);
346
347
diff --git a/linux-user/include/host/sparc/host-signal.h b/linux-user/include/host/sparc/host-signal.h
348
index XXXXXXX..XXXXXXX 100644
349
--- a/linux-user/include/host/sparc/host-signal.h
350
+++ b/linux-user/include/host/sparc/host-signal.h
351
@@ -XXX,XX +XXX,XX @@
352
#ifndef SPARC_HOST_SIGNAL_H
353
#define SPARC_HOST_SIGNAL_H
354
355
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
356
+/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */
357
+typedef ucontext_t host_sigcontext;
358
+
359
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
360
{
361
#ifdef __arch64__
362
return uc->uc_mcontext.mc_gregs[MC_PC];
363
@@ -XXX,XX +XXX,XX @@ static inline uintptr_t host_signal_pc(ucontext_t *uc)
364
#endif
365
}
366
367
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
368
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
369
{
370
#ifdef __arch64__
371
uc->uc_mcontext.mc_gregs[MC_PC] = pc;
372
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
373
#endif
374
}
375
376
-static inline void *host_signal_mask(ucontext_t *uc)
377
+static inline void *host_signal_mask(host_sigcontext *uc)
378
{
379
return &uc->uc_sigmask;
380
}
381
382
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
383
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
384
{
385
uint32_t insn = *(uint32_t *)host_signal_pc(uc);
386
387
diff --git a/linux-user/include/host/x86_64/host-signal.h b/linux-user/include/host/x86_64/host-signal.h
388
index XXXXXXX..XXXXXXX 100644
389
--- a/linux-user/include/host/x86_64/host-signal.h
390
+++ b/linux-user/include/host/x86_64/host-signal.h
391
@@ -XXX,XX +XXX,XX @@
392
#ifndef X86_64_HOST_SIGNAL_H
393
#define X86_64_HOST_SIGNAL_H
394
395
-static inline uintptr_t host_signal_pc(ucontext_t *uc)
396
+/* The third argument to a SA_SIGINFO handler is ucontext_t. */
397
+typedef ucontext_t host_sigcontext;
398
+
399
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
400
{
401
return uc->uc_mcontext.gregs[REG_RIP];
402
}
403
404
-static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
405
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
406
{
407
uc->uc_mcontext.gregs[REG_RIP] = pc;
408
}
409
410
-static inline void *host_signal_mask(ucontext_t *uc)
411
+static inline void *host_signal_mask(host_sigcontext *uc)
412
{
413
return &uc->uc_sigmask;
414
}
415
416
-static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
417
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
418
{
419
return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
420
&& (uc->uc_mcontext.gregs[REG_ERR] & 0x2);
421
diff --git a/linux-user/signal.c b/linux-user/signal.c
422
index XXXXXXX..XXXXXXX 100644
423
--- a/linux-user/signal.c
424
+++ b/linux-user/signal.c
425
@@ -XXX,XX +XXX,XX @@ void queue_signal(CPUArchState *env, int sig, int si_type,
426
/* Adjust the signal context to rewind out of safe-syscall if we're in it */
427
static inline void rewind_if_in_safe_syscall(void *puc)
428
{
429
- ucontext_t *uc = (ucontext_t *)puc;
430
+ host_sigcontext *uc = (host_sigcontext *)puc;
431
uintptr_t pcreg = host_signal_pc(uc);
432
433
if (pcreg > (uintptr_t)safe_syscall_start
434
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
435
CPUState *cpu = env_cpu(env);
436
TaskState *ts = cpu->opaque;
437
target_siginfo_t tinfo;
438
- ucontext_t *uc = puc;
439
+ host_sigcontext *uc = puc;
440
struct emulated_sigtable *k;
441
int guest_sig;
442
uintptr_t pc = 0;
443
--
135
--
444
2.25.1
136
2.43.0
445
446
diff view generated by jsdifflib
1
We do not support sparc32 as a host, so there's no point in
1
Change the representation from sign bit repetitions to all bits equal
2
sparc64 redirecting to sparc.
2
to the sign bit, including the sign bit itself.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
23
---
7
linux-user/include/host/sparc/host-signal.h | 71 -------------------
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
8
linux-user/include/host/sparc64/host-signal.h | 64 ++++++++++++++++-
25
1 file changed, 15 insertions(+), 49 deletions(-)
9
2 files changed, 63 insertions(+), 72 deletions(-)
10
delete mode 100644 linux-user/include/host/sparc/host-signal.h
11
26
12
diff --git a/linux-user/include/host/sparc/host-signal.h b/linux-user/include/host/sparc/host-signal.h
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
deleted file mode 100644
28
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX
29
--- a/tcg/optimize.c
15
--- a/linux-user/include/host/sparc/host-signal.h
30
+++ b/tcg/optimize.c
16
+++ /dev/null
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
@@ -XXX,XX +XXX,XX @@
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
18
-/*
33
uint64_t val;
19
- * host-signal.h: signal info dependent on the host architecture
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
20
- *
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
21
- * Copyright (c) 2003-2005 Fabrice Bellard
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
22
- * Copyright (c) 2021 Linaro Limited
37
} TempOptInfo;
23
- *
38
24
- * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
39
typedef struct OptContext {
25
- * See the COPYING file in the top-level directory.
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
26
- */
41
27
-
42
/* In flight values from optimization. */
28
-#ifndef SPARC_HOST_SIGNAL_H
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
29
-#define SPARC_HOST_SIGNAL_H
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
30
-
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
31
-/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */
46
TCGType type;
32
-typedef ucontext_t host_sigcontext;
47
} OptContext;
33
-
48
34
-static inline uintptr_t host_signal_pc(host_sigcontext *uc)
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
35
-{
51
-{
36
-#ifdef __arch64__
52
- int rep = clrsb64(value);
37
- return uc->uc_mcontext.mc_gregs[MC_PC];
53
- return ~(~0ull >> rep);
38
-#else
39
- return uc->uc_mcontext.gregs[REG_PC];
40
-#endif
41
-}
54
-}
42
-
55
-
43
-static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
44
-{
63
-{
45
-#ifdef __arch64__
64
- /*
46
- uc->uc_mcontext.mc_gregs[MC_PC] = pc;
65
- * Only the 0 bits are significant for zmask, thus the msb itself
47
-#else
66
- * must be zero, else we have no sign information.
48
- uc->uc_mcontext.gregs[REG_PC] = pc;
67
- */
49
-#endif
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
50
-}
74
-}
51
-
75
-
52
-static inline void *host_signal_mask(host_sigcontext *uc)
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
53
-{
83
-{
54
- return &uc->uc_sigmask;
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
55
-}
86
-}
56
-
87
-
57
-static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
58
-{
89
{
59
- uint32_t insn = *(uint32_t *)host_signal_pc(uc);
90
return ts->state_ptr;
60
-
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
61
- if ((insn >> 30) == 3) {
92
ti->is_const = true;
62
- switch ((insn >> 19) & 0x3f) {
93
ti->val = ts->val;
63
- case 0x05: /* stb */
94
ti->z_mask = ts->val;
64
- case 0x15: /* stba */
95
- ti->s_mask = smask_from_value(ts->val);
65
- case 0x06: /* sth */
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
66
- case 0x16: /* stha */
97
} else {
67
- case 0x04: /* st */
98
ti->is_const = false;
68
- case 0x14: /* sta */
99
ti->z_mask = -1;
69
- case 0x07: /* std */
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
70
- case 0x17: /* stda */
101
*/
71
- case 0x0e: /* stx */
102
if (i == 0) {
72
- case 0x1e: /* stxa */
103
ts_info(ts)->z_mask = ctx->z_mask;
73
- case 0x24: /* stf */
104
- ts_info(ts)->s_mask = ctx->s_mask;
74
- case 0x34: /* stfa */
105
}
75
- case 0x27: /* stdf */
106
}
76
- case 0x37: /* stdfa */
107
}
77
- case 0x26: /* stqf */
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
78
- case 0x36: /* stqfa */
109
* The passed s_mask may be augmented by z_mask.
79
- case 0x25: /* stfsr */
110
*/
80
- case 0x3c: /* casa */
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
81
- case 0x3e: /* casxa */
112
- uint64_t z_mask, uint64_t s_mask)
82
- return true;
113
+ uint64_t z_mask, int64_t s_mask)
83
- }
114
{
84
- }
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
85
- return false;
116
TCGTemp *ts;
86
-}
117
TempOptInfo *ti;
87
-
118
+ int rep;
88
-#endif
119
89
diff --git a/linux-user/include/host/sparc64/host-signal.h b/linux-user/include/host/sparc64/host-signal.h
120
/* Only single-output opcodes are supported here. */
90
index XXXXXXX..XXXXXXX 100644
121
tcg_debug_assert(def->nb_oargs == 1);
91
--- a/linux-user/include/host/sparc64/host-signal.h
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
92
+++ b/linux-user/include/host/sparc64/host-signal.h
123
*/
93
@@ -1 +1,63 @@
124
if (ctx->type == TCG_TYPE_I32) {
94
-#include "../sparc/host-signal.h"
125
z_mask = (int32_t)z_mask;
95
+/*
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
96
+ * host-signal.h: signal info dependent on the host architecture
127
+ s_mask |= INT32_MIN;
97
+ *
128
}
98
+ * Copyright (c) 2003-2005 Fabrice Bellard
129
99
+ * Copyright (c) 2021 Linaro Limited
130
if (z_mask == 0) {
100
+ *
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
101
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
132
102
+ * See the COPYING file in the top-level directory.
133
ti = ts_info(ts);
103
+ */
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
104
+
136
+
105
+#ifndef SPARC64_HOST_SIGNAL_H
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
106
+#define SPARC64_HOST_SIGNAL_H
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
107
+
142
+
108
+/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */
143
return true;
109
+typedef ucontext_t host_sigcontext;
144
}
110
+
145
111
+static inline uintptr_t host_signal_pc(host_sigcontext *uc)
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
112
+{
147
113
+ return uc->uc_mcontext.mc_gregs[MC_PC];
148
ctx->z_mask = z_mask;
114
+}
149
ctx->s_mask = s_mask;
115
+
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
116
+static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
117
+{
152
return true;
118
+ uc->uc_mcontext.mc_gregs[MC_PC] = pc;
153
}
119
+}
154
120
+
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
121
+static inline void *host_signal_mask(host_sigcontext *uc)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
122
+{
157
ctx->s_mask = s_mask;
123
+ return &uc->uc_sigmask;
158
124
+}
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
126
+static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
161
return true;
127
+{
162
}
128
+ uint32_t insn = *(uint32_t *)host_signal_pc(uc);
163
129
+
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
130
+ if ((insn >> 30) == 3) {
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
131
+ switch ((insn >> 19) & 0x3f) {
166
132
+ case 0x05: /* stb */
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
133
+ case 0x15: /* stba */
168
- ctx->s_mask = smask_from_smask(s_mask);
134
+ case 0x06: /* sth */
169
135
+ case 0x16: /* stha */
170
return fold_masks(ctx, op);
136
+ case 0x04: /* st */
171
}
137
+ case 0x14: /* sta */
138
+ case 0x07: /* std */
139
+ case 0x17: /* stda */
140
+ case 0x0e: /* stx */
141
+ case 0x1e: /* stxa */
142
+ case 0x24: /* stf */
143
+ case 0x34: /* stfa */
144
+ case 0x27: /* stdf */
145
+ case 0x37: /* stdfa */
146
+ case 0x26: /* stqf */
147
+ case 0x36: /* stqfa */
148
+ case 0x25: /* stfsr */
149
+ case 0x3c: /* casa */
150
+ case 0x3e: /* casxa */
151
+ return true;
152
+ }
153
+ }
154
+ return false;
155
+}
156
+
157
+#endif
158
--
172
--
159
2.25.1
173
2.43.0
160
161
diff view generated by jsdifflib
1
Do not directly access the uc_sigmask member.
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
This is preparation for a sparc64 fix.
3
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
linux-user/include/host/aarch64/host-signal.h | 5 +++++
4
tcg/optimize.c | 9 +++++----
9
linux-user/include/host/alpha/host-signal.h | 5 +++++
5
1 file changed, 5 insertions(+), 4 deletions(-)
10
linux-user/include/host/arm/host-signal.h | 5 +++++
11
linux-user/include/host/i386/host-signal.h | 5 +++++
12
.../include/host/loongarch64/host-signal.h | 5 +++++
13
linux-user/include/host/mips/host-signal.h | 5 +++++
14
linux-user/include/host/ppc/host-signal.h | 5 +++++
15
linux-user/include/host/riscv/host-signal.h | 5 +++++
16
linux-user/include/host/s390/host-signal.h | 5 +++++
17
linux-user/include/host/sparc/host-signal.h | 5 +++++
18
linux-user/include/host/x86_64/host-signal.h | 5 +++++
19
linux-user/signal.c | 18 ++++++++----------
20
12 files changed, 63 insertions(+), 10 deletions(-)
21
6
22
diff --git a/linux-user/include/host/aarch64/host-signal.h b/linux-user/include/host/aarch64/host-signal.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
23
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
24
--- a/linux-user/include/host/aarch64/host-signal.h
9
--- a/tcg/optimize.c
25
+++ b/linux-user/include/host/aarch64/host-signal.h
10
+++ b/tcg/optimize.c
26
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
27
uc->uc_mcontext.pc = pc;
12
remove_mem_copy_all(ctx);
28
}
13
}
29
14
30
+static inline void *host_signal_mask(ucontext_t *uc)
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
31
+{
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
32
+ return &uc->uc_sigmask;
33
+}
34
+
35
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
36
{
17
{
37
struct _aarch64_ctx *hdr;
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
38
diff --git a/linux-user/include/host/alpha/host-signal.h b/linux-user/include/host/alpha/host-signal.h
19
int i, nb_oargs;
39
index XXXXXXX..XXXXXXX 100644
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
40
--- a/linux-user/include/host/alpha/host-signal.h
21
ts_info(ts)->z_mask = ctx->z_mask;
41
+++ b/linux-user/include/host/alpha/host-signal.h
22
}
42
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
23
}
43
uc->uc_mcontext.sc_pc = pc;
24
+ return true;
44
}
25
}
45
26
46
+static inline void *host_signal_mask(ucontext_t *uc)
27
/*
47
+{
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
48
+ return &uc->uc_sigmask;
29
fold_xi_to_x(ctx, op, 0)) {
49
+}
30
return true;
50
+
31
}
51
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
32
- return false;
52
{
33
+ return finish_folding(ctx, op);
53
uint32_t *pc = (uint32_t *)host_signal_pc(uc);
54
diff --git a/linux-user/include/host/arm/host-signal.h b/linux-user/include/host/arm/host-signal.h
55
index XXXXXXX..XXXXXXX 100644
56
--- a/linux-user/include/host/arm/host-signal.h
57
+++ b/linux-user/include/host/arm/host-signal.h
58
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
59
uc->uc_mcontext.arm_pc = pc;
60
}
34
}
61
35
62
+static inline void *host_signal_mask(ucontext_t *uc)
36
/* We cannot as yet do_constant_folding with vectors. */
63
+{
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
64
+ return &uc->uc_sigmask;
38
fold_xi_to_x(ctx, op, 0)) {
65
+}
39
return true;
66
+
40
}
67
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
41
- return false;
68
{
42
+ return finish_folding(ctx, op);
69
/*
70
diff --git a/linux-user/include/host/i386/host-signal.h b/linux-user/include/host/i386/host-signal.h
71
index XXXXXXX..XXXXXXX 100644
72
--- a/linux-user/include/host/i386/host-signal.h
73
+++ b/linux-user/include/host/i386/host-signal.h
74
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
75
uc->uc_mcontext.gregs[REG_EIP] = pc;
76
}
43
}
77
44
78
+static inline void *host_signal_mask(ucontext_t *uc)
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
79
+{
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
80
+ return &uc->uc_sigmask;
47
op->args[4] = arg_new_constant(ctx, bl);
81
+}
48
op->args[5] = arg_new_constant(ctx, bh);
82
+
49
}
83
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
50
- return false;
84
{
51
+ return finish_folding(ctx, op);
85
return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
86
diff --git a/linux-user/include/host/loongarch64/host-signal.h b/linux-user/include/host/loongarch64/host-signal.h
87
index XXXXXXX..XXXXXXX 100644
88
--- a/linux-user/include/host/loongarch64/host-signal.h
89
+++ b/linux-user/include/host/loongarch64/host-signal.h
90
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
91
uc->uc_mcontext.__pc = pc;
92
}
52
}
93
53
94
+static inline void *host_signal_mask(ucontext_t *uc)
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
95
+{
96
+ return &uc->uc_sigmask;
97
+}
98
+
99
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
100
{
101
const uint32_t *pinsn = (const uint32_t *)host_signal_pc(uc);
102
diff --git a/linux-user/include/host/mips/host-signal.h b/linux-user/include/host/mips/host-signal.h
103
index XXXXXXX..XXXXXXX 100644
104
--- a/linux-user/include/host/mips/host-signal.h
105
+++ b/linux-user/include/host/mips/host-signal.h
106
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
107
uc->uc_mcontext.pc = pc;
108
}
109
110
+static inline void *host_signal_mask(ucontext_t *uc)
111
+{
112
+ return &uc->uc_sigmask;
113
+}
114
+
115
#if defined(__misp16) || defined(__mips_micromips)
116
#error "Unsupported encoding"
117
#endif
118
diff --git a/linux-user/include/host/ppc/host-signal.h b/linux-user/include/host/ppc/host-signal.h
119
index XXXXXXX..XXXXXXX 100644
120
--- a/linux-user/include/host/ppc/host-signal.h
121
+++ b/linux-user/include/host/ppc/host-signal.h
122
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
123
uc->uc_mcontext.regs->nip = pc;
124
}
125
126
+static inline void *host_signal_mask(ucontext_t *uc)
127
+{
128
+ return &uc->uc_sigmask;
129
+}
130
+
131
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
132
{
133
return uc->uc_mcontext.regs->trap != 0x400
134
diff --git a/linux-user/include/host/riscv/host-signal.h b/linux-user/include/host/riscv/host-signal.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/linux-user/include/host/riscv/host-signal.h
137
+++ b/linux-user/include/host/riscv/host-signal.h
138
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
139
uc->uc_mcontext.__gregs[REG_PC] = pc;
140
}
141
142
+static inline void *host_signal_mask(ucontext_t *uc)
143
+{
144
+ return &uc->uc_sigmask;
145
+}
146
+
147
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
148
{
149
/*
150
diff --git a/linux-user/include/host/s390/host-signal.h b/linux-user/include/host/s390/host-signal.h
151
index XXXXXXX..XXXXXXX 100644
152
--- a/linux-user/include/host/s390/host-signal.h
153
+++ b/linux-user/include/host/s390/host-signal.h
154
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
155
uc->uc_mcontext.psw.addr = pc;
156
}
157
158
+static inline void *host_signal_mask(ucontext_t *uc)
159
+{
160
+ return &uc->uc_sigmask;
161
+}
162
+
163
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
164
{
165
uint16_t *pinsn = (uint16_t *)host_signal_pc(uc);
166
diff --git a/linux-user/include/host/sparc/host-signal.h b/linux-user/include/host/sparc/host-signal.h
167
index XXXXXXX..XXXXXXX 100644
168
--- a/linux-user/include/host/sparc/host-signal.h
169
+++ b/linux-user/include/host/sparc/host-signal.h
170
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
171
#endif
172
}
173
174
+static inline void *host_signal_mask(ucontext_t *uc)
175
+{
176
+ return &uc->uc_sigmask;
177
+}
178
+
179
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
180
{
181
uint32_t insn = *(uint32_t *)host_signal_pc(uc);
182
diff --git a/linux-user/include/host/x86_64/host-signal.h b/linux-user/include/host/x86_64/host-signal.h
183
index XXXXXXX..XXXXXXX 100644
184
--- a/linux-user/include/host/x86_64/host-signal.h
185
+++ b/linux-user/include/host/x86_64/host-signal.h
186
@@ -XXX,XX +XXX,XX @@ static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
187
uc->uc_mcontext.gregs[REG_RIP] = pc;
188
}
189
190
+static inline void *host_signal_mask(ucontext_t *uc)
191
+{
192
+ return &uc->uc_sigmask;
193
+}
194
+
195
static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
196
{
197
return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe
198
diff --git a/linux-user/signal.c b/linux-user/signal.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/linux-user/signal.c
201
+++ b/linux-user/signal.c
202
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
203
int guest_sig;
204
uintptr_t pc = 0;
205
bool sync_sig = false;
206
+ void *sigmask = host_signal_mask(uc);
207
208
/*
209
* Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
210
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
211
if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) {
212
/* If this was a write to a TB protected page, restart. */
213
if (is_write &&
214
- handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask,
215
- pc, guest_addr)) {
216
+ handle_sigsegv_accerr_write(cpu, sigmask, pc, guest_addr)) {
217
return;
218
}
219
220
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
221
}
222
}
223
224
- sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
225
+ sigprocmask(SIG_SETMASK, sigmask, NULL);
226
cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
227
} else {
228
- sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
229
+ sigprocmask(SIG_SETMASK, sigmask, NULL);
230
if (info->si_code == BUS_ADRALN) {
231
cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
232
}
233
@@ -XXX,XX +XXX,XX @@ static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
234
* now and it getting out to the main loop. Signals will be
235
* unblocked again in process_pending_signals().
236
*
237
- * WARNING: we cannot use sigfillset() here because the uc_sigmask
238
+ * WARNING: we cannot use sigfillset() here because the sigmask
239
* field is a kernel sigset_t, which is much smaller than the
240
* libc sigset_t which sigfillset() operates on. Using sigfillset()
241
* would write 0xff bytes off the end of the structure and trash
242
* data on the struct.
243
- * We can't use sizeof(uc->uc_sigmask) either, because the libc
244
- * headers define the struct field with the wrong (too large) type.
245
*/
246
- memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
247
- sigdelset(&uc->uc_sigmask, SIGSEGV);
248
- sigdelset(&uc->uc_sigmask, SIGBUS);
249
+ memset(sigmask, 0xff, SIGSET_T_SIZE);
250
+ sigdelset(sigmask, SIGSEGV);
251
+ sigdelset(sigmask, SIGBUS);
252
253
/* interrupt the virtual CPU as soon as possible */
254
cpu_exit(thread_cpu);
255
--
55
--
256
2.25.1
56
2.43.0
257
258
diff view generated by jsdifflib
1
This is kinda sorta the opposite of the other tcg hosts, where
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
we get (normal) alignment checks for free with host SIGBUS and
3
need to add code to support unaligned accesses.
4
2
5
Fortunately, the ISA contains pairs of instructions that are
6
used to implement unaligned memory accesses. Use them.
7
8
Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
9
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
10
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
4
---
13
tcg/mips/tcg-target.h | 2 -
5
tcg/optimize.c | 20 +++++++++++++++++---
14
tcg/mips/tcg-target.c.inc | 334 +++++++++++++++++++++++++++++++++++++-
6
1 file changed, 17 insertions(+), 3 deletions(-)
15
2 files changed, 328 insertions(+), 8 deletions(-)
16
7
17
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/mips/tcg-target.h
10
--- a/tcg/optimize.c
20
+++ b/tcg/mips/tcg-target.h
11
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
22
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t)
13
return ts_info(arg_temp(arg));
23
QEMU_ERROR("code path is reachable");
24
25
-#ifdef CONFIG_SOFTMMU
26
#define TCG_TARGET_NEED_LDST_LABELS
27
-#endif
28
29
#endif
30
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/mips/tcg-target.c.inc
33
+++ b/tcg/mips/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@
35
* THE SOFTWARE.
36
*/
37
38
+#include "../tcg-ldst.c.inc"
39
+
40
#ifdef HOST_WORDS_BIGENDIAN
41
# define MIPS_BE 1
42
#else
43
@@ -XXX,XX +XXX,XX @@ typedef enum {
44
OPC_ORI = 015 << 26,
45
OPC_XORI = 016 << 26,
46
OPC_LUI = 017 << 26,
47
+ OPC_BNEL = 025 << 26,
48
+ OPC_BNEZALC_R6 = 030 << 26,
49
OPC_DADDIU = 031 << 26,
50
+ OPC_LDL = 032 << 26,
51
+ OPC_LDR = 033 << 26,
52
OPC_LB = 040 << 26,
53
OPC_LH = 041 << 26,
54
+ OPC_LWL = 042 << 26,
55
OPC_LW = 043 << 26,
56
OPC_LBU = 044 << 26,
57
OPC_LHU = 045 << 26,
58
+ OPC_LWR = 046 << 26,
59
OPC_LWU = 047 << 26,
60
OPC_SB = 050 << 26,
61
OPC_SH = 051 << 26,
62
+ OPC_SWL = 052 << 26,
63
OPC_SW = 053 << 26,
64
+ OPC_SDL = 054 << 26,
65
+ OPC_SDR = 055 << 26,
66
+ OPC_SWR = 056 << 26,
67
OPC_LD = 067 << 26,
68
OPC_SD = 077 << 26,
69
70
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
71
}
14
}
72
15
73
#if defined(CONFIG_SOFTMMU)
16
+static inline bool ti_is_const(TempOptInfo *ti)
74
-#include "../tcg-ldst.c.inc"
75
-
76
static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
77
[MO_UB] = helper_ret_ldub_mmu,
78
[MO_SB] = helper_ret_ldsb_mmu,
79
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
80
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
81
return true;
82
}
83
-#endif
84
+
85
+#else
86
+
87
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
88
+ TCGReg addrhi, unsigned a_bits)
89
+{
17
+{
90
+ unsigned a_mask = (1 << a_bits) - 1;
18
+ return ti->is_const;
91
+ TCGLabelQemuLdst *l = new_ldst_label(s);
92
+
93
+ l->is_ld = is_ld;
94
+ l->addrlo_reg = addrlo;
95
+ l->addrhi_reg = addrhi;
96
+
97
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
98
+ tcg_debug_assert(a_bits < 16);
99
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
100
+
101
+ l->label_ptr[0] = s->code_ptr;
102
+ if (use_mips32r6_instructions) {
103
+ tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0);
104
+ } else {
105
+ tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO);
106
+ tcg_out_nop(s);
107
+ }
108
+
109
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
110
+}
19
+}
111
+
20
+
112
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
113
+{
22
+{
114
+ void *target;
23
+ return ti->val;
115
+
116
+ if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
117
+ return false;
118
+ }
119
+
120
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
121
+ /* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */
122
+ TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg;
123
+ TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg;
124
+
125
+ if (a3 != TCG_REG_A2) {
126
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
127
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
128
+ } else if (a2 != TCG_REG_A3) {
129
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
130
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
131
+ } else {
132
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2);
133
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3);
134
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0);
135
+ }
136
+ } else {
137
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
138
+ }
139
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
140
+
141
+ /*
142
+ * Tail call to the helper, with the return address back inline.
143
+ * We have arrived here via BNEL, so $31 is already set.
144
+ */
145
+ target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st);
146
+ tcg_out_call_int(s, target, true);
147
+ return true;
148
+}
24
+}
149
+
25
+
150
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
151
+{
27
+{
152
+ return tcg_out_fail_alignment(s, l);
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
153
+}
29
+}
154
+
30
+
155
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
31
static inline bool ts_is_const(TCGTemp *ts)
156
+{
32
{
157
+ return tcg_out_fail_alignment(s, l);
33
- return ts_info(ts)->is_const;
158
+}
34
+ return ti_is_const(ts_info(ts));
159
+#endif /* SOFTMMU */
160
161
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
162
TCGReg base, MemOp opc, bool is_64)
163
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
164
}
165
}
35
}
166
36
167
+static void __attribute__((unused))
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
168
+tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
169
+ TCGReg base, MemOp opc, bool is_64)
170
+{
171
+ const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
172
+ const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL;
173
+ const MIPSInsn ld1 = MIPS_BE ? OPC_LDL : OPC_LDR;
174
+ const MIPSInsn ld2 = MIPS_BE ? OPC_LDR : OPC_LDL;
175
+
176
+ bool sgn = (opc & MO_SIGN);
177
+
178
+ switch (opc & (MO_SSIZE | MO_BSWAP)) {
179
+ case MO_SW | MO_BE:
180
+ case MO_UW | MO_BE:
181
+ tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 0);
182
+ tcg_out_opc_imm(s, OPC_LBU, lo, base, 1);
183
+ if (use_mips32r2_instructions) {
184
+ tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8);
185
+ } else {
186
+ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8);
187
+ tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1);
188
+ }
189
+ break;
190
+
191
+ case MO_SW | MO_LE:
192
+ case MO_UW | MO_LE:
193
+ if (use_mips32r2_instructions && lo != base) {
194
+ tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
195
+ tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 1);
196
+ tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8);
197
+ } else {
198
+ tcg_out_opc_imm(s, OPC_LBU, TCG_TMP0, base, 0);
199
+ tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP1, base, 1);
200
+ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP1, TCG_TMP1, 8);
201
+ tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1);
202
+ }
203
+ break;
204
+
205
+ case MO_SL:
206
+ case MO_UL:
207
+ tcg_out_opc_imm(s, lw1, lo, base, 0);
208
+ tcg_out_opc_imm(s, lw2, lo, base, 3);
209
+ if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) {
210
+ tcg_out_ext32u(s, lo, lo);
211
+ }
212
+ break;
213
+
214
+ case MO_UL | MO_BSWAP:
215
+ case MO_SL | MO_BSWAP:
216
+ if (use_mips32r2_instructions) {
217
+ tcg_out_opc_imm(s, lw1, lo, base, 0);
218
+ tcg_out_opc_imm(s, lw2, lo, base, 3);
219
+ tcg_out_bswap32(s, lo, lo,
220
+ TCG_TARGET_REG_BITS == 64 && is_64
221
+ ? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0);
222
+ } else {
223
+ const tcg_insn_unit *subr =
224
+ (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn
225
+ ? bswap32u_addr : bswap32_addr);
226
+
227
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0);
228
+ tcg_out_bswap_subr(s, subr);
229
+ /* delay slot */
230
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3);
231
+ tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3);
232
+ }
233
+ break;
234
+
235
+ case MO_UQ:
236
+ if (TCG_TARGET_REG_BITS == 64) {
237
+ tcg_out_opc_imm(s, ld1, lo, base, 0);
238
+ tcg_out_opc_imm(s, ld2, lo, base, 7);
239
+ } else {
240
+ tcg_out_opc_imm(s, lw1, MIPS_BE ? hi : lo, base, 0 + 0);
241
+ tcg_out_opc_imm(s, lw2, MIPS_BE ? hi : lo, base, 0 + 3);
242
+ tcg_out_opc_imm(s, lw1, MIPS_BE ? lo : hi, base, 4 + 0);
243
+ tcg_out_opc_imm(s, lw2, MIPS_BE ? lo : hi, base, 4 + 3);
244
+ }
245
+ break;
246
+
247
+ case MO_UQ | MO_BSWAP:
248
+ if (TCG_TARGET_REG_BITS == 64) {
249
+ if (use_mips32r2_instructions) {
250
+ tcg_out_opc_imm(s, ld1, lo, base, 0);
251
+ tcg_out_opc_imm(s, ld2, lo, base, 7);
252
+ tcg_out_bswap64(s, lo, lo);
253
+ } else {
254
+ tcg_out_opc_imm(s, ld1, TCG_TMP0, base, 0);
255
+ tcg_out_bswap_subr(s, bswap64_addr);
256
+ /* delay slot */
257
+ tcg_out_opc_imm(s, ld2, TCG_TMP0, base, 7);
258
+ tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
259
+ }
260
+ } else if (use_mips32r2_instructions) {
261
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0);
262
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3);
263
+ tcg_out_opc_imm(s, lw1, TCG_TMP1, base, 4 + 0);
264
+ tcg_out_opc_imm(s, lw2, TCG_TMP1, base, 4 + 3);
265
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0);
266
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1);
267
+ tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16);
268
+ tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16);
269
+ } else {
270
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0);
271
+ tcg_out_bswap_subr(s, bswap32_addr);
272
+ /* delay slot */
273
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3);
274
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 4 + 0);
275
+ tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3);
276
+ tcg_out_bswap_subr(s, bswap32_addr);
277
+ /* delay slot */
278
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 4 + 3);
279
+ tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3);
280
+ }
281
+ break;
282
+
283
+ default:
284
+ g_assert_not_reached();
285
+ }
286
+}
287
+
288
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
289
{
38
{
290
TCGReg addr_regl, addr_regh __attribute__((unused));
39
- TempOptInfo *ti = ts_info(ts);
291
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
40
- return ti->is_const && ti->val == val;
292
MemOp opc;
41
+ return ti_is_const_val(ts_info(ts), val);
293
#if defined(CONFIG_SOFTMMU)
294
tcg_insn_unit *label_ptr[2];
295
+#else
296
+ unsigned a_bits, s_bits;
297
#endif
298
TCGReg base = TCG_REG_A0;
299
300
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
301
} else {
302
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
303
}
304
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
305
+ a_bits = get_alignment_bits(opc);
306
+ s_bits = opc & MO_SIZE;
307
+ /*
308
+ * R6 removes the left/right instructions but requires the
309
+ * system to support misaligned memory accesses.
310
+ */
311
+ if (use_mips32r6_instructions) {
312
+ if (a_bits) {
313
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
314
+ }
315
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
316
+ } else {
317
+ if (a_bits && a_bits != s_bits) {
318
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
319
+ }
320
+ if (a_bits >= s_bits) {
321
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
322
+ } else {
323
+ tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
324
+ }
325
+ }
326
#endif
327
}
42
}
328
43
329
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
44
static inline bool arg_is_const(TCGArg arg)
330
}
331
}
332
333
+static void __attribute__((unused))
334
+tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
335
+ TCGReg base, MemOp opc)
336
+{
337
+ const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR;
338
+ const MIPSInsn sw2 = MIPS_BE ? OPC_SWR : OPC_SWL;
339
+ const MIPSInsn sd1 = MIPS_BE ? OPC_SDL : OPC_SDR;
340
+ const MIPSInsn sd2 = MIPS_BE ? OPC_SDR : OPC_SDL;
341
+
342
+ /* Don't clutter the code below with checks to avoid bswapping ZERO. */
343
+ if ((lo | hi) == 0) {
344
+ opc &= ~MO_BSWAP;
345
+ }
346
+
347
+ switch (opc & (MO_SIZE | MO_BSWAP)) {
348
+ case MO_16 | MO_BE:
349
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8);
350
+ tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 0);
351
+ tcg_out_opc_imm(s, OPC_SB, lo, base, 1);
352
+ break;
353
+
354
+ case MO_16 | MO_LE:
355
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8);
356
+ tcg_out_opc_imm(s, OPC_SB, lo, base, 0);
357
+ tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 1);
358
+ break;
359
+
360
+ case MO_32 | MO_BSWAP:
361
+ tcg_out_bswap32(s, TCG_TMP3, lo, 0);
362
+ lo = TCG_TMP3;
363
+ /* fall through */
364
+ case MO_32:
365
+ tcg_out_opc_imm(s, sw1, lo, base, 0);
366
+ tcg_out_opc_imm(s, sw2, lo, base, 3);
367
+ break;
368
+
369
+ case MO_64 | MO_BSWAP:
370
+ if (TCG_TARGET_REG_BITS == 64) {
371
+ tcg_out_bswap64(s, TCG_TMP3, lo);
372
+ lo = TCG_TMP3;
373
+ } else if (use_mips32r2_instructions) {
374
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? hi : lo);
375
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? lo : hi);
376
+ tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16);
377
+ tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16);
378
+ hi = MIPS_BE ? TCG_TMP0 : TCG_TMP1;
379
+ lo = MIPS_BE ? TCG_TMP1 : TCG_TMP0;
380
+ } else {
381
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0);
382
+ tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 0 + 0);
383
+ tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 0 + 3);
384
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0);
385
+ tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 4 + 0);
386
+ tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 4 + 3);
387
+ break;
388
+ }
389
+ /* fall through */
390
+ case MO_64:
391
+ if (TCG_TARGET_REG_BITS == 64) {
392
+ tcg_out_opc_imm(s, sd1, lo, base, 0);
393
+ tcg_out_opc_imm(s, sd2, lo, base, 7);
394
+ } else {
395
+ tcg_out_opc_imm(s, sw1, MIPS_BE ? hi : lo, base, 0 + 0);
396
+ tcg_out_opc_imm(s, sw2, MIPS_BE ? hi : lo, base, 0 + 3);
397
+ tcg_out_opc_imm(s, sw1, MIPS_BE ? lo : hi, base, 4 + 0);
398
+ tcg_out_opc_imm(s, sw2, MIPS_BE ? lo : hi, base, 4 + 3);
399
+ }
400
+ break;
401
+
402
+ default:
403
+ tcg_abort();
404
+ }
405
+}
406
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
407
{
408
TCGReg addr_regl, addr_regh __attribute__((unused));
409
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
410
MemOp opc;
411
#if defined(CONFIG_SOFTMMU)
412
tcg_insn_unit *label_ptr[2];
413
+#else
414
+ unsigned a_bits, s_bits;
415
#endif
416
TCGReg base = TCG_REG_A0;
417
418
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
419
data_regl, data_regh, addr_regl, addr_regh,
420
s->code_ptr, label_ptr);
421
#else
422
- base = TCG_REG_A0;
423
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
424
tcg_out_ext32u(s, base, addr_regl);
425
addr_regl = base;
426
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
427
} else {
428
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
429
}
430
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
431
+ a_bits = get_alignment_bits(opc);
432
+ s_bits = opc & MO_SIZE;
433
+ /*
434
+ * R6 removes the left/right instructions but requires the
435
+ * system to support misaligned memory accesses.
436
+ */
437
+ if (use_mips32r6_instructions) {
438
+ if (a_bits) {
439
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
440
+ }
441
+ tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
442
+ } else {
443
+ if (a_bits && a_bits != s_bits) {
444
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
445
+ }
446
+ if (a_bits >= s_bits) {
447
+ tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
448
+ } else {
449
+ tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
450
+ }
451
+ }
452
#endif
453
}
454
455
--
45
--
456
2.25.1
46
2.43.0
457
458
diff view generated by jsdifflib
1
From armv6, the architecture supports unaligned accesses.
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
All we need to do is perform the correct alignment check
2
Sink mask computation below fold_affected_mask early exit.
3
in tcg_out_tlb_read.
4
3
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/arm/tcg-target.c.inc | 41 ++++++++++++++++++++--------------------
7
tcg/optimize.c | 30 ++++++++++++++++--------------
9
1 file changed, 21 insertions(+), 20 deletions(-)
8
1 file changed, 16 insertions(+), 14 deletions(-)
10
9
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/arm/tcg-target.c.inc
12
--- a/tcg/optimize.c
14
+++ b/tcg/arm/tcg-target.c.inc
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
16
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
15
17
: offsetof(CPUTLBEntry, addr_write));
16
static bool fold_and(OptContext *ctx, TCGOp *op)
18
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
17
{
19
- unsigned s_bits = opc & MO_SIZE;
18
- uint64_t z1, z2;
20
- unsigned a_bits = get_alignment_bits(opc);
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
21
-
31
-
22
- /*
32
- /*
23
- * We don't support inline unaligned acceses, but we can easily
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
24
- * support overalignment checks.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
25
- */
35
- */
26
- if (a_bits < s_bits) {
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- a_bits = s_bits;
37
- & arg_info(op->args[2])->s_mask;
28
- }
38
+ t1 = arg_info(op->args[1]);
29
+ unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
39
+ t2 = arg_info(op->args[2]);
30
+ unsigned a_mask = (1 << get_alignment_bits(opc)) - 1;
40
+ z1 = t1->z_mask;
31
+ TCGReg t_addr;
41
+ z2 = t2->z_mask;
32
33
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
34
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
35
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
36
42
37
/*
43
/*
38
* Check alignment, check comparators.
44
* Known-zeros does not imply known-ones. Therefore unless
39
- * Do this in no more than 3 insns. Use MOVW for v7, if possible,
45
* arg2 is constant, we can't infer affected bits from it.
40
+ * Do this in 2-4 insns. Use MOVW for v7, if possible,
41
* to reduce the number of sequential conditional instructions.
42
* Almost all guests have at least 4k pages, which means that we need
43
* to clear at least 9 bits even for an 8-byte memory, which means it
44
* isn't worth checking for an immediate operand for BIC.
45
+ *
46
+ * For unaligned accesses, test the page of the last unit of alignment.
47
+ * This leaves the least significant alignment bits unchanged, and of
48
+ * course must be zero.
49
*/
46
*/
50
+ t_addr = addrlo;
47
- if (arg_is_const(op->args[2]) &&
51
+ if (a_mask < s_mask) {
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
52
+ t_addr = TCG_REG_R0;
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
53
+ tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
50
return true;
54
+ addrlo, s_mask - a_mask);
55
+ }
56
if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
57
- tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1));
58
-
59
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask);
60
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
61
tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
62
- addrlo, TCG_REG_TMP, 0);
63
+ t_addr, TCG_REG_TMP, 0);
64
tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
65
} else {
66
- if (a_bits) {
67
- tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo,
68
- (1 << a_bits) - 1);
69
+ if (a_mask) {
70
+ tcg_debug_assert(a_mask <= 0xff);
71
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
72
}
73
- tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo,
74
+ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
75
SHIFT_IMM_LSR(TARGET_PAGE_BITS));
76
- tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP,
77
+ tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
78
0, TCG_REG_R2, TCG_REG_TMP,
79
SHIFT_IMM_LSL(TARGET_PAGE_BITS));
80
}
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
81
--
66
--
82
2.25.1
67
2.43.0
83
84
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
15
return true;
16
}
17
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_z(ctx, op, z_mask);
50
}
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
1
We can use the routines just added for user-only to emit
1
The input which overlaps the sign bit of the output can
2
unaligned accesses in softmmu mode too.
2
have its input s_mask propagated to the output s_mask.
3
3
4
Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
tcg/mips/tcg-target.c.inc | 91 ++++++++++++++++++++++-----------------
7
tcg/optimize.c | 14 ++++++++++++--
10
1 file changed, 51 insertions(+), 40 deletions(-)
8
1 file changed, 12 insertions(+), 2 deletions(-)
11
9
12
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/mips/tcg-target.c.inc
12
--- a/tcg/optimize.c
15
+++ b/tcg/mips/tcg-target.c.inc
13
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
tcg_insn_unit *label_ptr[2], bool is_load)
15
TempOptInfo *t2 = arg_info(op->args[2]);
18
{
16
int ofs = op->args[3];
19
MemOp opc = get_memop(oi);
17
int len = op->args[4];
20
- unsigned s_bits = opc & MO_SIZE;
18
+ int width;
21
unsigned a_bits = get_alignment_bits(opc);
19
TCGOpcode and_opc;
22
+ unsigned s_bits = opc & MO_SIZE;
20
- uint64_t z_mask;
23
+ unsigned a_mask = (1 << a_bits) - 1;
21
+ uint64_t z_mask, s_mask;
24
+ unsigned s_mask = (1 << s_bits) - 1;
22
25
int mem_index = get_mmuidx(oi);
23
if (ti_is_const(t1) && ti_is_const(t2)) {
26
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
27
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
28
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
26
switch (ctx->type) {
29
int add_off = offsetof(CPUTLBEntry, addend);
27
case TCG_TYPE_I32:
30
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
28
and_opc = INDEX_op_and_i32;
31
: offsetof(CPUTLBEntry, addr_write));
29
+ width = 32;
32
- target_ulong mask;
30
break;
33
+ target_ulong tlb_mask;
31
case TCG_TYPE_I64:
34
32
and_opc = INDEX_op_and_i64;
35
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
33
+ width = 64;
36
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
34
break;
37
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
35
default:
38
/* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */
36
g_assert_not_reached();
39
tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
40
38
return fold_and(ctx, op);
41
- /* We don't currently support unaligned accesses.
42
- We could do so with mips32r6. */
43
- if (a_bits < s_bits) {
44
- a_bits = s_bits;
45
- }
46
-
47
- /* Mask the page bits, keeping the alignment bits to compare against. */
48
- mask = (target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
49
-
50
/* Load the (low-half) tlb comparator. */
51
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
52
- tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
53
- tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, mask);
54
+ tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
55
} else {
56
tcg_out_ldst(s, (TARGET_LONG_BITS == 64 ? OPC_LD
57
: TCG_TARGET_REG_BITS == 64 ? OPC_LWU : OPC_LW),
58
TCG_TMP0, TCG_TMP3, cmp_off);
59
- tcg_out_movi(s, TCG_TYPE_TL, TCG_TMP1, mask);
60
- /* No second compare is required here;
61
- load the tlb addend for the fast path. */
62
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
63
}
39
}
64
40
65
/* Zero extend a 32-bit guest address for a 64-bit host. */
41
+ /* The s_mask from the top portion of the deposit is still valid. */
66
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
42
+ if (ofs + len == width) {
67
tcg_out_ext32u(s, base, addrl);
43
+ s_mask = t2->s_mask << ofs;
68
addrl = base;
69
}
70
- tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
71
+
72
+ /*
73
+ * Mask the page bits, keeping the alignment bits to compare against.
74
+ * For unaligned accesses, compare against the end of the access to
75
+ * verify that it does not cross a page boundary.
76
+ */
77
+ tlb_mask = (target_ulong)TARGET_PAGE_MASK | a_mask;
78
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_TMP1, tlb_mask);
79
+ if (a_mask >= s_mask) {
80
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, addrl);
81
+ } else {
44
+ } else {
82
+ tcg_out_opc_imm(s, ALIAS_PADDI, TCG_TMP2, addrl, s_mask - a_mask);
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
83
+ tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2);
84
+ }
46
+ }
85
+
47
+
86
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
87
+ /* Load the tlb addend for the fast path. */
49
- return fold_masks_z(ctx, op, z_mask);
88
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
89
+ }
90
91
label_ptr[0] = s->code_ptr;
92
tcg_out_opc_br(s, OPC_BNE, TCG_TMP1, TCG_TMP0);
93
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
94
/* Load and test the high half tlb comparator. */
95
if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
96
/* delay slot */
97
- tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
98
+ tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + HI_OFF);
99
100
/* Load the tlb addend for the fast path. */
101
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP2, TCG_TMP3, add_off);
102
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
103
}
104
}
51
}
105
52
106
-static void __attribute__((unused))
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
107
-tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
108
+static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
109
TCGReg base, MemOp opc, bool is_64)
110
{
111
const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
113
#if defined(CONFIG_SOFTMMU)
114
tcg_insn_unit *label_ptr[2];
115
#else
116
- unsigned a_bits, s_bits;
117
#endif
118
+ unsigned a_bits, s_bits;
119
TCGReg base = TCG_REG_A0;
120
121
data_regl = *args++;
122
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
123
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
124
oi = *args++;
125
opc = get_memop(oi);
126
+ a_bits = get_alignment_bits(opc);
127
+ s_bits = opc & MO_SIZE;
128
129
+ /*
130
+ * R6 removes the left/right instructions but requires the
131
+ * system to support misaligned memory accesses.
132
+ */
133
#if defined(CONFIG_SOFTMMU)
134
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 1);
135
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
136
+ if (use_mips32r6_instructions || a_bits >= s_bits) {
137
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
138
+ } else {
139
+ tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
140
+ }
141
add_qemu_ldst_label(s, 1, oi,
142
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
143
data_regl, data_regh, addr_regl, addr_regh,
144
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
145
} else {
146
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
147
}
148
- a_bits = get_alignment_bits(opc);
149
- s_bits = opc & MO_SIZE;
150
- /*
151
- * R6 removes the left/right instructions but requires the
152
- * system to support misaligned memory accesses.
153
- */
154
if (use_mips32r6_instructions) {
155
if (a_bits) {
156
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
157
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
158
}
159
}
160
161
-static void __attribute__((unused))
162
-tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
163
+static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
164
TCGReg base, MemOp opc)
165
{
166
const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR;
167
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
168
MemOp opc;
169
#if defined(CONFIG_SOFTMMU)
170
tcg_insn_unit *label_ptr[2];
171
-#else
172
- unsigned a_bits, s_bits;
173
#endif
174
+ unsigned a_bits, s_bits;
175
TCGReg base = TCG_REG_A0;
176
177
data_regl = *args++;
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
179
addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
180
oi = *args++;
181
opc = get_memop(oi);
182
+ a_bits = get_alignment_bits(opc);
183
+ s_bits = opc & MO_SIZE;
184
185
+ /*
186
+ * R6 removes the left/right instructions but requires the
187
+ * system to support misaligned memory accesses.
188
+ */
189
#if defined(CONFIG_SOFTMMU)
190
tcg_out_tlb_load(s, base, addr_regl, addr_regh, oi, label_ptr, 0);
191
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
192
+ if (use_mips32r6_instructions || a_bits >= s_bits) {
193
+ tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
194
+ } else {
195
+ tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
196
+ }
197
add_qemu_ldst_label(s, 0, oi,
198
(is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
199
data_regl, data_regh, addr_regl, addr_regh,
200
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
201
} else {
202
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
203
}
204
- a_bits = get_alignment_bits(opc);
205
- s_bits = opc & MO_SIZE;
206
- /*
207
- * R6 removes the left/right instructions but requires the
208
- * system to support misaligned memory accesses.
209
- */
210
if (use_mips32r6_instructions) {
211
if (a_bits) {
212
tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
213
--
54
--
214
2.25.1
55
2.43.0
215
216
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
1
A mostly generic test for unaligned access raising SIGBUS.
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tests/tcg/multiarch/sigbus.c | 68 ++++++++++++++++++++++++++++++++++++
7
tcg/optimize.c | 13 ++++++++++---
7
1 file changed, 68 insertions(+)
8
1 file changed, 10 insertions(+), 3 deletions(-)
8
create mode 100644 tests/tcg/multiarch/sigbus.c
9
9
10
diff --git a/tests/tcg/multiarch/sigbus.c b/tests/tcg/multiarch/sigbus.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
new file mode 100644
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX
12
--- a/tcg/optimize.c
13
--- /dev/null
13
+++ b/tcg/optimize.c
14
+++ b/tests/tcg/multiarch/sigbus.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
@@ -XXX,XX +XXX,XX @@
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
+#define _GNU_SOURCE 1
16
}
17
+
17
18
+#include <assert.h>
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+#include <stdlib.h>
20
+#include <signal.h>
21
+#include <endian.h>
22
+
23
+
24
+unsigned long long x = 0x8877665544332211ull;
25
+void * volatile p = (void *)&x + 1;
26
+
27
+void sigbus(int sig, siginfo_t *info, void *uc)
28
+{
19
+{
29
+ assert(sig == SIGBUS);
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
30
+ assert(info->si_signo == SIGBUS);
31
+#ifdef BUS_ADRALN
32
+ assert(info->si_code == BUS_ADRALN);
33
+#endif
34
+ assert(info->si_addr == p);
35
+ exit(EXIT_SUCCESS);
36
+}
21
+}
37
+
22
+
38
+int main()
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
39
+{
24
{
40
+ struct sigaction sa = {
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
41
+ .sa_sigaction = sigbus,
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
42
+ .sa_flags = SA_SIGINFO
27
43
+ };
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
44
+ int allow_fail = 0;
29
{
45
+ int tmp;
30
+ uint64_t s_mask;
46
+
31
+
47
+ tmp = sigaction(SIGBUS, &sa, NULL);
32
if (fold_const2_commutative(ctx, op) ||
48
+ assert(tmp == 0);
33
fold_xi_to_x(ctx, op, -1) ||
49
+
34
fold_xi_to_not(ctx, op, 0)) {
50
+ /*
35
return true;
51
+ * Select an operation that's likely to enforce alignment.
36
}
52
+ * On many guests that support unaligned accesses by default,
37
53
+ * this is often an atomic operation.
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
54
+ */
39
- & arg_info(op->args[2])->s_mask;
55
+#if defined(__aarch64__)
40
- return false;
56
+ asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
41
+ s_mask = arg_info(op->args[1])->s_mask
57
+#elif defined(__alpha__)
42
+ & arg_info(op->args[2])->s_mask;
58
+ asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
43
+ return fold_masks_s(ctx, op, s_mask);
59
+#elif defined(__arm__)
44
}
60
+ asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
45
61
+#elif defined(__powerpc__)
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
62
+ asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
63
+#elif defined(__riscv_atomic)
64
+ asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
65
+#else
66
+ /* No insn known to fault unaligned -- try for a straight load. */
67
+ allow_fail = 1;
68
+ tmp = *(volatile int *)p;
69
+#endif
70
+
71
+ assert(allow_fail);
72
+
73
+ /*
74
+ * We didn't see a signal.
75
+ * We might as well validate the unaligned load worked.
76
+ */
77
+ if (BYTE_ORDER == LITTLE_ENDIAN) {
78
+ assert(tmp == 0x55443322);
79
+ } else {
80
+ assert(tmp == 0x77665544);
81
+ }
82
+ return EXIT_SUCCESS;
83
+}
84
--
47
--
85
2.25.1
48
2.43.0
86
87
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/sparc/tcg-target.c.inc | 15 +++++++++++++++
7
tcg/optimize.c | 29 ++++++++++++-----------------
5
1 file changed, 15 insertions(+)
8
1 file changed, 12 insertions(+), 17 deletions(-)
6
9
7
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/sparc/tcg-target.c.inc
12
--- a/tcg/optimize.c
10
+++ b/tcg/sparc/tcg-target.c.inc
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
insn &= ~INSN_OFF19(-1);
15
13
insn |= INSN_OFF19(pcrel);
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
14
break;
40
break;
15
+ case R_SPARC_13:
41
CASE_OP_32_64(ext16s):
16
+ if (!check_fit_ptr(value, 13)) {
42
- sign = INT16_MIN;
17
+ return false;
43
- z_mask = (uint16_t)z_mask;
18
+ }
44
+ s_mask |= INT16_MIN;
19
+ insn &= ~INSN_IMM13(-1);
45
+ z_mask = (int16_t)z_mask;
20
+ insn |= INSN_IMM13(value);
46
break;
21
+ break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
22
default:
56
default:
23
g_assert_not_reached();
57
g_assert_not_reached();
24
}
58
}
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
59
26
return;
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
27
}
69
}
28
70
29
+ /* Use the constant pool, if possible. */
71
- return fold_masks(ctx, op);
30
+ if (!in_prologue && USE_REG_TB) {
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
31
+ new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
73
}
32
+ tcg_tbrel_diff(s, NULL));
74
33
+ tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
34
+ return;
35
+ }
36
+
37
/* A 64-bit constant decomposed into 2 32-bit pieces. */
38
if (check_fit_i32(lo, 13)) {
39
hi = (arg - lo) >> 32;
40
--
76
--
41
2.25.1
77
2.43.0
42
43
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
This will allow us to control exactly what scratch register is
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
used for loading the constant.
3
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
tcg/sparc/tcg-target.c.inc | 15 +++++++++------
4
tcg/optimize.c | 6 +++---
8
1 file changed, 9 insertions(+), 6 deletions(-)
5
1 file changed, 3 insertions(+), 3 deletions(-)
9
6
10
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/sparc/tcg-target.c.inc
9
--- a/tcg/optimize.c
13
+++ b/tcg/sparc/tcg-target.c.inc
10
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
15
}
17
}
16
18
17
static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
18
- tcg_target_long arg, bool in_prologue)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
19
+ tcg_target_long arg, bool in_prologue,
21
fold_xi_to_i(ctx, op, 0)) {
20
+ TCGReg scratch)
22
return true;
21
{
22
tcg_target_long hi, lo = (int32_t)arg;
23
tcg_target_long test, lsb;
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
25
} else {
26
hi = arg >> 32;
27
tcg_out_movi_imm32(s, ret, hi);
28
- tcg_out_movi_imm32(s, TCG_REG_T2, lo);
29
+ tcg_out_movi_imm32(s, scratch, lo);
30
tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
31
- tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
32
+ tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
33
}
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
34
}
26
}
35
27
36
static void tcg_out_movi(TCGContext *s, TCGType type,
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
37
TCGReg ret, tcg_target_long arg)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
38
{
30
tcg_opt_gen_movi(ctx, op2, rh, h);
39
- tcg_out_movi_int(s, type, ret, arg, false);
31
return true;
40
+ tcg_debug_assert(ret != TCG_REG_T2);
32
}
41
+ tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
33
- return false;
34
+ return finish_folding(ctx, op);
42
}
35
}
43
36
44
static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
45
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
46
} else {
47
uintptr_t desti = (uintptr_t)dest;
48
tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
49
- desti & ~0xfff, in_prologue);
50
+ desti & ~0xfff, in_prologue, TCG_REG_O7);
51
tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
52
}
53
}
54
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
55
56
#ifndef CONFIG_SOFTMMU
57
if (guest_base != 0) {
58
- tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
59
+ tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
60
+ guest_base, true, TCG_REG_T1);
61
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
62
}
63
#endif
64
--
38
--
65
2.25.1
39
2.43.0
66
67
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
1
From: Idan Horowitz <idan.horowitz@gmail.com>
1
Avoid the use of the OptContext slots.
2
2
3
Instead of taking the lock of the cpu work list in order to check if it's
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
empty, we can just read the head pointer atomically. This decreases
5
cpu_work_list_empty's share from 5% to 1.3% in a profile of icount-enabled
6
aarch64-softmmu.
7
8
Signed-off-by: Idan Horowitz <idan.horowitz@gmail.com>
9
Message-Id: <20220114004358.299534-1-idan.horowitz@gmail.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
5
---
13
softmmu/cpus.c | 7 +------
6
tcg/optimize.c | 7 +------
14
1 file changed, 1 insertion(+), 6 deletions(-)
7
1 file changed, 1 insertion(+), 6 deletions(-)
15
8
16
diff --git a/softmmu/cpus.c b/softmmu/cpus.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
18
--- a/softmmu/cpus.c
11
--- a/tcg/optimize.c
19
+++ b/softmmu/cpus.c
12
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ bool cpu_is_stopped(CPUState *cpu)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
21
14
if (fold_const1(ctx, op)) {
22
bool cpu_work_list_empty(CPUState *cpu)
15
return true;
23
{
16
}
24
- bool ret;
25
-
17
-
26
- qemu_mutex_lock(&cpu->work_mutex);
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
27
- ret = QSIMPLEQ_EMPTY(&cpu->work_list);
19
-
28
- qemu_mutex_unlock(&cpu->work_mutex);
20
- /* Because of fold_to_not, we want to always return true, via finish. */
29
- return ret;
21
- finish_folding(ctx, op);
30
+ return QSIMPLEQ_EMPTY_ATOMIC(&cpu->work_list);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
31
}
24
}
32
25
33
bool cpu_thread_is_idle(CPUState *cpu)
26
static bool fold_or(OptContext *ctx, TCGOp *op)
34
--
27
--
35
2.25.1
28
2.43.0
36
37
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
1
Due to mapping changes, we now rarely place the code_gen_buffer
1
Avoid the use of the OptContext slots.
2
near the main executable. Which means that direct calls will
3
now rarely be in range.
4
2
5
So, always use indirect calls for tail calls, which allows us to
3
Be careful not to call fold_masks_zs when the memory operation
6
avoid clobbering %o7, and therefore we need not save and restore it.
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
7
6
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
9
---
11
tcg/sparc/tcg-target.c.inc | 37 +++++++++++++++++++++++--------------
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
12
1 file changed, 23 insertions(+), 14 deletions(-)
11
1 file changed, 21 insertions(+), 5 deletions(-)
13
12
14
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/sparc/tcg-target.c.inc
15
--- a/tcg/optimize.c
17
+++ b/tcg/sparc/tcg-target.c.inc
16
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
19
tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
18
return fold_masks_s(ctx, op, s_mask);
20
}
19
}
21
20
22
+static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
23
+ bool in_prologue, bool tail_call)
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
24
+{
23
{
25
+ uintptr_t desti = (uintptr_t)dest;
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
26
+
43
+
27
+ /* Be careful not to clobber %o7 for a tail call. */
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
28
+ tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
29
+ desti & ~0xfff, in_prologue,
30
+ tail_call ? TCG_REG_G2 : TCG_REG_O7);
31
+ tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
32
+ TCG_REG_T1, desti & 0xfff, JMPL);
33
+}
45
+}
34
+
46
+
35
static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
36
bool in_prologue)
48
+{
37
{
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
38
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
50
+ ctx->prev_mb = NULL;
39
if (disp == (int32_t)disp) {
51
+ return finish_folding(ctx, op);
40
tcg_out32(s, CALL | (uint32_t)disp >> 2);
41
} else {
42
- uintptr_t desti = (uintptr_t)dest;
43
- tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
44
- desti & ~0xfff, in_prologue, TCG_REG_O7);
45
- tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
46
+ tcg_out_jmpl_const(s, dest, in_prologue, false);
47
}
48
}
52
}
49
53
50
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
51
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
52
/* Set the retaddr operand. */
56
break;
53
tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
57
case INDEX_op_qemu_ld_a32_i32:
54
- /* Set the env operand. */
58
case INDEX_op_qemu_ld_a64_i32:
55
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
59
+ done = fold_qemu_ld_1reg(&ctx, op);
56
/* Tail call. */
60
+ break;
57
- tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
61
case INDEX_op_qemu_ld_a32_i64:
58
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
62
case INDEX_op_qemu_ld_a64_i64:
59
+ tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
63
+ if (TCG_TARGET_REG_BITS == 64) {
60
+ /* delay slot -- set the env argument */
64
+ done = fold_qemu_ld_1reg(&ctx, op);
61
+ tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
65
+ break;
62
}
66
+ }
63
67
+ QEMU_FALLTHROUGH;
64
for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
68
case INDEX_op_qemu_ld_a32_i128:
65
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
69
case INDEX_op_qemu_ld_a64_i128:
66
if (ra >= TCG_REG_O6) {
70
- done = fold_qemu_ld(&ctx, op);
67
tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
71
+ done = fold_qemu_ld_2reg(&ctx, op);
68
TCG_TARGET_CALL_STACK_OFFSET);
72
break;
69
- ra = TCG_REG_G1;
73
case INDEX_op_qemu_st8_a32_i32:
70
+ } else {
74
case INDEX_op_qemu_st8_a64_i32:
71
+ tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
72
}
73
- tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
74
- /* Set the env operand. */
75
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
76
+
77
/* Tail call. */
78
- tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
79
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
80
+ tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
81
+ /* delay slot -- set the env argument */
82
+ tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
83
}
84
}
85
#endif
86
--
75
--
87
2.25.1
76
2.43.0
88
89
diff view generated by jsdifflib
1
This is now always true, since we require armv6.
1
Stores have no output operands, and so need no further work.
2
2
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/arm/tcg-target.h | 3 +--
6
tcg/optimize.c | 11 +++++------
7
tcg/arm/tcg-target.c.inc | 35 ++++++-----------------------------
7
1 file changed, 5 insertions(+), 6 deletions(-)
8
2 files changed, 7 insertions(+), 31 deletions(-)
9
8
10
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/arm/tcg-target.h
11
--- a/tcg/optimize.c
13
+++ b/tcg/arm/tcg-target.h
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
15
14
{
16
extern int arm_arch;
15
/* Opcodes that touch guest memory stop the mb optimization. */
17
16
ctx->prev_mb = NULL;
18
-#define use_armv5t_instructions (__ARM_ARCH >= 5 || arm_arch >= 5)
17
- return false;
19
#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
18
+ return true;
20
#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
21
22
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
23
#define TCG_TARGET_HAS_eqv_i32 0
24
#define TCG_TARGET_HAS_nand_i32 0
25
#define TCG_TARGET_HAS_nor_i32 0
26
-#define TCG_TARGET_HAS_clz_i32 use_armv5t_instructions
27
+#define TCG_TARGET_HAS_clz_i32 1
28
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
29
#define TCG_TARGET_HAS_ctpop_i32 0
30
#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
31
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/arm/tcg-target.c.inc
34
+++ b/tcg/arm/tcg-target.c.inc
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
36
* Unless the C portion of QEMU is compiled as thumb, we don't need
37
* true BX semantics; merely a branch to an address held in a register.
38
*/
39
- if (use_armv5t_instructions) {
40
- tcg_out_bx_reg(s, cond, rn);
41
- } else {
42
- tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
43
- }
44
+ tcg_out_bx_reg(s, cond, rn);
45
}
19
}
46
20
47
static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
23
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
25
remove_mem_copy_all(ctx);
26
- return false;
27
+ return true;
49
}
28
}
50
29
51
/* LDR is interworking from v5t. */
30
switch (op->opc) {
52
- if (arm_mode || use_armv5t_instructions) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
53
- tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
32
g_assert_not_reached();
54
- return;
33
}
55
- }
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
56
-
35
- return false;
57
- /* else v4t */
36
+ return true;
58
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
59
- tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
60
+ tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
61
}
37
}
62
38
63
/*
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
64
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
65
if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
41
TCGType type;
66
if (arm_mode) {
42
67
tcg_out_bl_imm(s, COND_AL, disp);
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
68
- return;
44
- fold_tcg_st(ctx, op);
69
- }
45
- return false;
70
- if (use_armv5t_instructions) {
46
+ return fold_tcg_st(ctx, op);
71
+ } else {
72
tcg_out_blx_imm(s, disp);
73
- return;
74
}
75
+ return;
76
}
47
}
77
48
78
- if (use_armv5t_instructions) {
49
src = arg_temp(op->args[0]);
79
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
80
- tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
51
last = ofs + tcg_type_size(type) - 1;
81
- } else if (arm_mode) {
52
remove_mem_copy_in(ctx, ofs, last);
82
- /* ??? Know that movi_pool emits exactly 1 insn. */
53
record_mem_copy(ctx, type, src, ofs, last);
83
- tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
54
- return false;
84
- tcg_out_movi_pool(s, COND_AL, TCG_REG_PC, addri);
55
+ return true;
85
- } else {
86
- tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
87
- tcg_out_mov_reg(s, COND_AL, TCG_REG_R14, TCG_REG_PC);
88
- tcg_out_bx_reg(s, COND_AL, TCG_REG_TMP);
89
- }
90
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
91
+ tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
92
}
56
}
93
57
94
static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
95
--
59
--
96
2.25.1
60
2.43.0
97
98
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
1
This is now always true, since we require armv6.
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
2
3
3
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
tcg/arm/tcg-target.h | 1 -
7
tcg/optimize.c | 22 ++++++++++++++--------
7
tcg/arm/tcg-target.c.inc | 192 ++++++---------------------------------
8
1 file changed, 14 insertions(+), 8 deletions(-)
8
2 files changed, 27 insertions(+), 166 deletions(-)
9
9
10
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/arm/tcg-target.h
12
--- a/tcg/optimize.c
13
+++ b/tcg/arm/tcg-target.h
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
15
return finish_folding(ctx, op);
16
extern int arm_arch;
16
}
17
17
18
-#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
#undef TCG_TARGET_STACK_GROWSUP
22
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/arm/tcg-target.c.inc
25
+++ b/tcg/arm/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
27
static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
28
TCGReg rn, TCGReg rm)
29
{
21
{
30
- /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
22
uint64_t a_zmask, b_val;
31
- if (!use_armv6_instructions && rd == rn) {
23
TCGCond cond;
32
- if (rd == rm) {
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
33
- /* rd == rn == rm; copy an input to tmp first. */
25
op->opc = xor_opc;
34
- tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
26
op->args[2] = arg_new_constant(ctx, 1);
35
- rm = rn = TCG_REG_TMP;
27
}
36
- } else {
28
- return false;
37
- rn = rm;
29
+ return -1;
38
- rm = rd;
30
}
39
- }
40
- }
41
/* mul */
42
tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
43
}
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
45
static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
46
TCGReg rd1, TCGReg rn, TCGReg rm)
47
{
48
- /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
49
- if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
50
- if (rd0 == rm || rd1 == rm) {
51
- tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
52
- rn = TCG_REG_TMP;
53
- } else {
54
- TCGReg t = rn;
55
- rn = rm;
56
- rm = t;
57
- }
58
- }
59
/* umull */
60
tcg_out32(s, (cond << 28) | 0x00800090 |
61
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
63
static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
64
TCGReg rd1, TCGReg rn, TCGReg rm)
65
{
66
- /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
67
- if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
68
- if (rd0 == rm || rd1 == rm) {
69
- tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
70
- rn = TCG_REG_TMP;
71
- } else {
72
- TCGReg t = rn;
73
- rn = rm;
74
- rm = t;
75
- }
76
- }
77
/* smull */
78
tcg_out32(s, (cond << 28) | 0x00c00090 |
79
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
80
@@ -XXX,XX +XXX,XX @@ static void tcg_out_udiv(TCGContext *s, ARMCond cond,
81
82
static void tcg_out_ext8s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
83
{
84
- if (use_armv6_instructions) {
85
- /* sxtb */
86
- tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
87
- } else {
88
- tcg_out_dat_reg(s, cond, ARITH_MOV,
89
- rd, 0, rn, SHIFT_IMM_LSL(24));
90
- tcg_out_dat_reg(s, cond, ARITH_MOV,
91
- rd, 0, rd, SHIFT_IMM_ASR(24));
92
- }
93
+ /* sxtb */
94
+ tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
95
}
96
97
static void __attribute__((unused))
98
@@ -XXX,XX +XXX,XX @@ tcg_out_ext8u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
99
100
static void tcg_out_ext16s(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
101
{
102
- if (use_armv6_instructions) {
103
- /* sxth */
104
- tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
105
- } else {
106
- tcg_out_dat_reg(s, cond, ARITH_MOV,
107
- rd, 0, rn, SHIFT_IMM_LSL(16));
108
- tcg_out_dat_reg(s, cond, ARITH_MOV,
109
- rd, 0, rd, SHIFT_IMM_ASR(16));
110
- }
111
+ /* sxth */
112
+ tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
113
}
114
115
static void tcg_out_ext16u(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
116
{
117
- if (use_armv6_instructions) {
118
- /* uxth */
119
- tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
120
- } else {
121
- tcg_out_dat_reg(s, cond, ARITH_MOV,
122
- rd, 0, rn, SHIFT_IMM_LSL(16));
123
- tcg_out_dat_reg(s, cond, ARITH_MOV,
124
- rd, 0, rd, SHIFT_IMM_LSR(16));
125
- }
126
+ /* uxth */
127
+ tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
128
}
129
130
static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
131
TCGReg rd, TCGReg rn, int flags)
132
{
133
- if (use_armv6_instructions) {
134
- if (flags & TCG_BSWAP_OS) {
135
- /* revsh */
136
- tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
137
- return;
138
- }
139
-
140
- /* rev16 */
141
- tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
142
- if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
143
- /* uxth */
144
- tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
145
- }
146
+ if (flags & TCG_BSWAP_OS) {
147
+ /* revsh */
148
+ tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
149
return;
150
}
151
152
- if (flags == 0) {
153
- /*
154
- * For stores, no input or output extension:
155
- * rn = xxAB
156
- * lsr tmp, rn, #8 tmp = 0xxA
157
- * and tmp, tmp, #0xff tmp = 000A
158
- * orr rd, tmp, rn, lsl #8 rd = xABA
159
- */
160
- tcg_out_dat_reg(s, cond, ARITH_MOV,
161
- TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
162
- tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
163
- tcg_out_dat_reg(s, cond, ARITH_ORR,
164
- rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
165
- return;
166
+ /* rev16 */
167
+ tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
168
+ if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
169
+ /* uxth */
170
+ tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
171
}
31
}
172
-
32
-
173
- /*
33
- return false;
174
- * Byte swap, leaving the result at the top of the register.
34
+ return 0;
175
- * We will then shift down, zero or sign-extending.
176
- */
177
- if (flags & TCG_BSWAP_IZ) {
178
- /*
179
- * rn = 00AB
180
- * ror tmp, rn, #8 tmp = B00A
181
- * orr tmp, tmp, tmp, lsl #16 tmp = BA00
182
- */
183
- tcg_out_dat_reg(s, cond, ARITH_MOV,
184
- TCG_REG_TMP, 0, rn, SHIFT_IMM_ROR(8));
185
- tcg_out_dat_reg(s, cond, ARITH_ORR,
186
- TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP,
187
- SHIFT_IMM_LSL(16));
188
- } else {
189
- /*
190
- * rn = xxAB
191
- * and tmp, rn, #0xff00 tmp = 00A0
192
- * lsl tmp, tmp, #8 tmp = 0A00
193
- * orr tmp, tmp, rn, lsl #24 tmp = BA00
194
- */
195
- tcg_out_dat_rI(s, cond, ARITH_AND, TCG_REG_TMP, rn, 0xff00, 1);
196
- tcg_out_dat_reg(s, cond, ARITH_MOV,
197
- TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSL(8));
198
- tcg_out_dat_reg(s, cond, ARITH_ORR,
199
- TCG_REG_TMP, TCG_REG_TMP, rn, SHIFT_IMM_LSL(24));
200
- }
201
- tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, TCG_REG_TMP,
202
- (flags & TCG_BSWAP_OS
203
- ? SHIFT_IMM_ASR(8) : SHIFT_IMM_LSR(8)));
204
}
35
}
205
36
206
static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
207
{
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
208
- if (use_armv6_instructions) {
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
209
- /* rev */
210
- tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
211
- } else {
212
- tcg_out_dat_reg(s, cond, ARITH_EOR,
213
- TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
214
- tcg_out_dat_imm(s, cond, ARITH_BIC,
215
- TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
216
- tcg_out_dat_reg(s, cond, ARITH_MOV,
217
- rd, 0, rn, SHIFT_IMM_ROR(8));
218
- tcg_out_dat_reg(s, cond, ARITH_EOR,
219
- rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
220
- }
221
+ /* rev */
222
+ tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
223
}
224
225
static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
226
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
227
{
228
if (use_armv7_instructions) {
229
tcg_out32(s, INSN_DMB_ISH);
230
- } else if (use_armv6_instructions) {
231
+ } else {
232
tcg_out32(s, INSN_DMB_MCR);
233
}
40
}
234
}
41
235
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
42
- if (fold_setcond_zmask(ctx, op, false)) {
236
if (argreg & 1) {
43
+ i = fold_setcond_zmask(ctx, op, false);
237
argreg++;
44
+ if (i > 0) {
45
return true;
238
}
46
}
239
- if (use_armv6_instructions && argreg >= 4
47
- fold_setcond_tst_pow2(ctx, op, false);
240
- && (arglo & 1) == 0 && arghi == arglo + 1) {
48
+ if (i == 0) {
241
+ if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
242
tcg_out_strd_8(s, COND_AL, arglo,
50
+ }
243
TCG_REG_CALL_STACK, (argreg - 4) * 4);
51
244
return argreg + 2;
52
ctx->z_mask = 1;
245
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
53
return false;
246
int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
247
: offsetof(CPUTLBEntry, addr_write));
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
248
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
249
- int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
250
- int table_off = fast_off + offsetof(CPUTLBDescFast, table);
251
unsigned s_bits = opc & MO_SIZE;
252
unsigned a_bits = get_alignment_bits(opc);
253
254
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
255
}
56
}
256
57
257
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
58
- if (fold_setcond_zmask(ctx, op, true)) {
258
- if (use_armv6_instructions) {
59
+ i = fold_setcond_zmask(ctx, op, true);
259
- tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
60
+ if (i > 0) {
260
- } else {
61
return true;
261
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off);
262
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off);
263
- }
264
+ tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
265
266
/* Extract the tlb index from the address into R0. */
267
tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
268
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
269
* Load the tlb comparator into R2/R3 and the fast path addend into R1.
270
*/
271
if (cmp_off == 0) {
272
- if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
273
+ if (TARGET_LONG_BITS == 64) {
274
tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
275
} else {
276
tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
277
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
278
} else {
279
tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
280
TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
281
- if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
282
+ if (TARGET_LONG_BITS == 64) {
283
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
284
} else {
285
tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
286
}
287
}
62
}
288
- if (!use_armv6_instructions && TARGET_LONG_BITS == 64) {
63
- fold_setcond_tst_pow2(ctx, op, true);
289
- tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4);
64
+ if (i == 0) {
290
- }
65
+ fold_setcond_tst_pow2(ctx, op, true);
291
66
+ }
292
/* Load the tlb addend. */
67
293
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
294
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
69
ctx->s_mask = -1;
295
TCGReg argreg, datalo, datahi;
296
MemOpIdx oi = lb->oi;
297
MemOp opc = get_memop(oi);
298
- void *func;
299
300
if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
301
return false;
302
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
303
argreg = tcg_out_arg_imm32(s, argreg, oi);
304
argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
305
306
- /* For armv6 we can use the canonical unsigned helpers and minimize
307
- icache usage. For pre-armv6, use the signed helpers since we do
308
- not have a single insn sign-extend. */
309
- if (use_armv6_instructions) {
310
- func = qemu_ld_helpers[opc & MO_SIZE];
311
- } else {
312
- func = qemu_ld_helpers[opc & MO_SSIZE];
313
- if (opc & MO_SIGN) {
314
- opc = MO_UL;
315
- }
316
- }
317
- tcg_out_call(s, func);
318
+ /* Use the canonical unsigned helpers and minimize icache usage. */
319
+ tcg_out_call(s, qemu_ld_helpers[opc & MO_SIZE]);
320
321
datalo = lb->datalo_reg;
322
datahi = lb->datahi_reg;
323
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
324
break;
325
case MO_UQ:
326
/* Avoid ldrd for user-only emulation, to handle unaligned. */
327
- if (USING_SOFTMMU && use_armv6_instructions
328
+ if (USING_SOFTMMU
329
&& (datalo & 1) == 0 && datahi == datalo + 1) {
330
tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
331
} else if (datalo != addend) {
332
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
333
break;
334
case MO_UQ:
335
/* Avoid ldrd for user-only emulation, to handle unaligned. */
336
- if (USING_SOFTMMU && use_armv6_instructions
337
+ if (USING_SOFTMMU
338
&& (datalo & 1) == 0 && datahi == datalo + 1) {
339
tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
340
} else if (datalo == addrlo) {
341
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
342
break;
343
case MO_64:
344
/* Avoid strd for user-only emulation, to handle unaligned. */
345
- if (USING_SOFTMMU && use_armv6_instructions
346
+ if (USING_SOFTMMU
347
&& (datalo & 1) == 0 && datahi == datalo + 1) {
348
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
349
} else {
350
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
351
break;
352
case MO_64:
353
/* Avoid strd for user-only emulation, to handle unaligned. */
354
- if (USING_SOFTMMU && use_armv6_instructions
355
+ if (USING_SOFTMMU
356
&& (datalo & 1) == 0 && datahi == datalo + 1) {
357
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
358
} else {
359
--
70
--
360
2.25.1
71
2.43.0
361
362
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
Reserve a register for the guest_base using aarch64 for reference.
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
By doing so, we do not have to recompute it for every memory load.
3
2
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/arm/tcg-target.c.inc | 39 ++++++++++++++++++++++++++++-----------
6
tcg/optimize.c | 27 ++++++++++++++-------------
8
1 file changed, 28 insertions(+), 11 deletions(-)
7
1 file changed, 14 insertions(+), 13 deletions(-)
9
8
10
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/arm/tcg-target.c.inc
11
--- a/tcg/optimize.c
13
+++ b/tcg/arm/tcg-target.c.inc
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[2] = {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
16
#define TCG_REG_TMP TCG_REG_R12
17
#define TCG_VEC_TMP TCG_REG_Q15
18
+#ifndef CONFIG_SOFTMMU
19
+#define TCG_REG_GUEST_BASE TCG_REG_R11
20
+#endif
21
22
typedef enum {
23
COND_EQ = 0x0,
24
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
25
26
static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
27
TCGReg datalo, TCGReg datahi,
28
- TCGReg addrlo, TCGReg addend)
29
+ TCGReg addrlo, TCGReg addend,
30
+ bool scratch_addend)
31
{
15
{
32
/* Byte swapping is left to middle-end expansion. */
16
uint64_t s_mask, z_mask, sign;
33
tcg_debug_assert((opc & MO_BSWAP) == 0);
17
+ TempOptInfo *t1, *t2;
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
18
35
if (get_alignment_bits(opc) >= MO_64
19
if (fold_const2(ctx, op) ||
36
&& (datalo & 1) == 0 && datahi == datalo + 1) {
20
fold_ix_to_i(ctx, op, 0) ||
37
tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
38
- } else if (datalo != addend) {
22
return true;
39
+ } else if (scratch_addend) {
40
tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
41
tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
42
} else {
43
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
44
label_ptr = s->code_ptr;
45
tcg_out_bl_imm(s, COND_NE, 0);
46
47
- tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
48
+ tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true);
49
50
add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
51
s->code_ptr, label_ptr);
52
#else /* !CONFIG_SOFTMMU */
53
if (guest_base) {
54
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
55
- tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP);
56
+ tcg_out_qemu_ld_index(s, opc, datalo, datahi,
57
+ addrlo, TCG_REG_GUEST_BASE, false);
58
} else {
59
tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
60
}
23
}
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
24
62
25
- s_mask = arg_info(op->args[1])->s_mask;
63
static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
26
- z_mask = arg_info(op->args[1])->z_mask;
64
TCGReg datalo, TCGReg datahi,
27
+ t1 = arg_info(op->args[1]);
65
- TCGReg addrlo, TCGReg addend)
28
+ t2 = arg_info(op->args[2]);
66
+ TCGReg addrlo, TCGReg addend,
29
+ s_mask = t1->s_mask;
67
+ bool scratch_addend)
30
+ z_mask = t1->z_mask;
68
{
31
69
/* Byte swapping is left to middle-end expansion. */
32
- if (arg_is_const(op->args[2])) {
70
tcg_debug_assert((opc & MO_BSWAP) == 0);
33
- int sh = arg_info(op->args[2])->val;
71
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
34
-
72
if (get_alignment_bits(opc) >= MO_64
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
73
&& (datalo & 1) == 0 && datahi == datalo + 1) {
36
+ if (ti_is_const(t2)) {
74
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
37
+ int sh = ti_const_val(t2);
75
- } else {
38
76
+ } else if (scratch_addend) {
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
77
tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
78
tcg_out_st32_12(s, cond, datahi, addend, 4);
41
79
+ } else {
42
- return fold_masks(ctx, op);
80
+ tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP,
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
81
+ addend, addrlo, SHIFT_IMM_LSL(0));
44
}
82
+ tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0);
45
83
+ tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4);
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
84
}
65
}
85
break;
66
break;
86
default:
67
default:
87
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
68
break;
88
mem_index = get_mmuidx(oi);
89
addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
90
91
- tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
92
+ tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi,
93
+ addrlo, addend, true);
94
95
/* The conditional call must come last, as we're going to return here. */
96
label_ptr = s->code_ptr;
97
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
98
s->code_ptr, label_ptr);
99
#else /* !CONFIG_SOFTMMU */
100
if (guest_base) {
101
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base);
102
- tcg_out_qemu_st_index(s, COND_AL, opc, datalo,
103
- datahi, addrlo, TCG_REG_TMP);
104
+ tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi,
105
+ addrlo, TCG_REG_GUEST_BASE, false);
106
} else {
107
tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
108
}
69
}
109
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
70
110
71
- return false;
111
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
72
+ return finish_folding(ctx, op);
112
73
}
113
+#ifndef CONFIG_SOFTMMU
74
114
+ if (guest_base) {
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
115
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
116
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
117
+ }
118
+#endif
119
+
120
tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
121
122
/*
123
--
76
--
124
2.25.1
77
2.43.0
125
126
diff view generated by jsdifflib
1
Since 7ecd02a06f8, if patch_reloc fails we restart translation
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
with a smaller TB. SPARC had its function signature changed,
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
but not the logic. Replace assert with return false.
3
will produce false.
4
4
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
tcg/sparc/tcg-target.c.inc | 8 ++++++--
8
tcg/optimize.c | 5 ++---
10
1 file changed, 6 insertions(+), 2 deletions(-)
9
1 file changed, 2 insertions(+), 3 deletions(-)
11
10
12
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/sparc/tcg-target.c.inc
13
--- a/tcg/optimize.c
15
+++ b/tcg/sparc/tcg-target.c.inc
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
17
16
18
switch (type) {
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
19
case R_SPARC_WDISP16:
18
{
20
- assert(check_fit_ptr(pcrel >> 2, 16));
19
- uint64_t s_mask, z_mask, sign;
21
+ if (!check_fit_ptr(pcrel >> 2, 16)) {
20
+ uint64_t s_mask, z_mask;
22
+ return false;
21
TempOptInfo *t1, *t2;
23
+ }
22
24
insn &= ~INSN_OFF16(-1);
23
if (fold_const2(ctx, op) ||
25
insn |= INSN_OFF16(pcrel);
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
26
break;
25
* If the sign bit is known zero, then logical right shift
27
case R_SPARC_WDISP19:
26
* will not reduce the number of input sign repetitions.
28
- assert(check_fit_ptr(pcrel >> 2, 19));
27
*/
29
+ if (!check_fit_ptr(pcrel >> 2, 19)) {
28
- sign = -s_mask;
30
+ return false;
29
- if (sign && !(z_mask & sign)) {
31
+ }
30
+ if (~z_mask & -s_mask) {
32
insn &= ~INSN_OFF19(-1);
31
return fold_masks_s(ctx, op, s_mask);
33
insn |= INSN_OFF19(pcrel);
32
}
34
break;
33
break;
35
--
34
--
36
2.25.1
35
2.43.0
37
38
diff view generated by jsdifflib
New patch
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 9 ++++++---
8
1 file changed, 6 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
15
fold_sub_to_neg(ctx, op)) {
16
return true;
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
21
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
{
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
38
}
39
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
41
--
42
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 16 +++++++++-------
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
12
TCGType type;
13
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
type = ctx->type;
20
--
21
2.43.0
diff view generated by jsdifflib
1
Support for unaligned accesses is difficult for pre-v6 hosts.
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
While debian still builds for armv4, we cannot use a compile
2
Remove fold_masks as the function becomes unused.
3
time test, so test the architecture at runtime and error out.
4
3
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/arm/tcg-target.c.inc | 5 +++++
7
tcg/optimize.c | 18 ++++++++----------
9
1 file changed, 5 insertions(+)
8
1 file changed, 8 insertions(+), 10 deletions(-)
10
9
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/arm/tcg-target.c.inc
12
--- a/tcg/optimize.c
14
+++ b/tcg/arm/tcg-target.c.inc
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
16
if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
15
return fold_masks_zs(ctx, op, -1, s_mask);
17
arm_arch = pl[1] - '0';
16
}
18
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
-{
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
21
-}
22
-
23
/*
24
* An "affected" mask bit is 0 if and only if the result is identical
25
* to the first input. Thus if the entire mask is 0, the operation
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
27
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask, s_mask;
31
+ TempOptInfo *t1, *t2;
19
+
32
+
20
+ if (arm_arch < 6) {
33
if (fold_const2_commutative(ctx, op) ||
21
+ error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
34
fold_xx_to_i(ctx, op, 0) ||
22
+ exit(EXIT_FAILURE);
35
fold_xi_to_x(ctx, op, 0) ||
23
+ }
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
24
}
38
}
25
39
26
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
27
--
53
--
28
2.25.1
54
2.43.0
29
30
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
/* Propagate constants and copies, fold constant expressions. */
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 ++----
8
1 file changed, 2 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
21
-
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
All mask setting is now done with parameters via fold_masks_*.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 -------------
7
1 file changed, 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
15
16
/* In flight values from optimization. */
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
47
--
48
2.43.0
diff view generated by jsdifflib
1
We had code for checking for 13 and 21-bit shifted constants,
1
All instances of s_mask have been converted to the new
2
but we can do better and allow 32-bit shifted constants.
2
representation. We can now re-enable usage.
3
This is still 2 insns shorter than the full 64-bit sequence.
4
3
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
tcg/sparc/tcg-target.c.inc | 12 ++++++------
7
tcg/optimize.c | 4 ++--
10
1 file changed, 6 insertions(+), 6 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
11
9
12
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/sparc/tcg-target.c.inc
12
--- a/tcg/optimize.c
15
+++ b/tcg/sparc/tcg-target.c.inc
13
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
17
return;
15
g_assert_not_reached();
18
}
16
}
19
17
20
- /* A 21-bit constant, shifted. */
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
21
+ /* A 32-bit constant, shifted. */
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
22
lsb = ctz64(arg);
20
return true;
23
test = (tcg_target_long)arg >> lsb;
24
- if (check_fit_tl(test, 13)) {
25
- tcg_out_movi_imm13(s, ret, test);
26
- tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
27
- return;
28
- } else if (lsb > 10 && test == extract64(test, 0, 21)) {
29
+ if (lsb > 10 && test == extract64(test, 0, 21)) {
30
tcg_out_sethi(s, ret, test << 10);
31
tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
32
return;
33
+ } else if (test == (uint32_t)test || test == (int32_t)test) {
34
+ tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
35
+ tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
36
+ return;
37
}
21
}
38
22
39
/* A 64-bit constant decomposed into 2 32-bit pieces. */
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
24
s_mask = s_mask_old >> pos;
25
s_mask |= -1ull << (len - 1);
26
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
return true;
30
}
31
40
--
32
--
41
2.25.1
33
2.43.0
42
43
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
The big comment just above says functions should be sorted.
2
Add forward declarations as needed.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/i386/tcg-target.h | 2 -
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
5
tcg/i386/tcg-target.c.inc | 103 ++++++++++++++++++++++++++++++++++++--
8
1 file changed, 59 insertions(+), 55 deletions(-)
6
2 files changed, 98 insertions(+), 7 deletions(-)
7
9
8
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/i386/tcg-target.h
12
--- a/tcg/optimize.c
11
+++ b/tcg/i386/tcg-target.h
13
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
13
15
* 3) those that produce information about the result value.
14
#define TCG_TARGET_HAS_MEMORY_BSWAP have_movbe
15
16
-#ifdef CONFIG_SOFTMMU
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif
22
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/i386/tcg-target.c.inc
25
+++ b/tcg/i386/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
27
* THE SOFTWARE.
28
*/
16
*/
29
17
30
+#include "../tcg-ldst.c.inc"
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
31
#include "../tcg-pool.c.inc"
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
32
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
33
#ifdef CONFIG_DEBUG_TCG
21
+
34
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
22
static bool fold_add(OptContext *ctx, TCGOp *op)
35
#define OPC_VZEROUPPER (0x77 | P_EXT)
23
{
36
#define OPC_XCHG_ax_r32    (0x90)
24
if (fold_const2_commutative(ctx, op) ||
37
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
38
-#define OPC_GRP3_Ev    (0xf7)
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
39
-#define OPC_GRP5    (0xff)
40
+#define OPC_GRP3_Eb (0xf6)
41
+#define OPC_GRP3_Ev (0xf7)
42
+#define OPC_GRP5 (0xff)
43
#define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
44
45
/* Group 1 opcode extensions for 0x80-0x83.
46
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
47
#define SHIFT_SAR 7
48
49
/* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */
50
+#define EXT3_TESTi 0
51
#define EXT3_NOT 2
52
#define EXT3_NEG 3
53
#define EXT3_MUL 4
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nopn(TCGContext *s, int n)
55
}
27
}
56
28
57
#if defined(CONFIG_SOFTMMU)
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
58
-#include "../tcg-ldst.c.inc"
59
-
60
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
61
* int mmu_idx, uintptr_t ra)
62
*/
63
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
64
tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
65
return true;
66
}
67
-#elif TCG_TARGET_REG_BITS == 32
68
+#else
69
+
70
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
71
+ TCGReg addrhi, unsigned a_bits)
72
+{
30
+{
73
+ unsigned a_mask = (1 << a_bits) - 1;
31
+ /* If true and false values are the same, eliminate the cmp. */
74
+ TCGLabelQemuLdst *label;
32
+ if (args_are_copies(op->args[2], op->args[3])) {
75
+
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
76
+ /*
77
+ * We are expecting a_bits to max out at 7, so we can usually use testb.
78
+ * For i686, we have to use testl for %esi/%edi.
79
+ */
80
+ if (a_mask <= 0xff && (TCG_TARGET_REG_BITS == 64 || addrlo < 4)) {
81
+ tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, addrlo);
82
+ tcg_out8(s, a_mask);
83
+ } else {
84
+ tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, addrlo);
85
+ tcg_out32(s, a_mask);
86
+ }
34
+ }
87
+
35
+
88
+ /* jne slow_path */
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
89
+ tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
37
+ uint64_t tv = arg_info(op->args[2])->val;
38
+ uint64_t fv = arg_info(op->args[3])->val;
90
+
39
+
91
+ label = new_ldst_label(s);
40
+ if (tv == -1 && fv == 0) {
92
+ label->is_ld = is_ld;
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
93
+ label->addrlo_reg = addrlo;
42
+ }
94
+ label->addrhi_reg = addrhi;
43
+ if (tv == 0 && fv == -1) {
95
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr + 4);
44
+ if (TCG_TARGET_HAS_not_vec) {
96
+ label->label_ptr[0] = s->code_ptr;
45
+ op->opc = INDEX_op_not_vec;
97
+
46
+ return fold_not(ctx, op);
98
+ s->code_ptr += 4;
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
51
+ }
52
+ }
53
+ }
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
99
+}
82
+}
100
+
83
+
101
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
102
+{
85
{
103
+ /* resolve label address */
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
104
+ tcg_patch32(l->label_ptr[0], s->code_ptr - l->label_ptr[0] - 4);
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
105
+
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
106
+ if (TCG_TARGET_REG_BITS == 32) {
107
+ int ofs = 0;
108
+
109
+ tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
110
+ ofs += 4;
111
+
112
+ tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
113
+ ofs += 4;
114
+ if (TARGET_LONG_BITS == 64) {
115
+ tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
116
+ ofs += 4;
117
+ }
118
+
119
+ tcg_out_pushi(s, (uintptr_t)l->raddr);
120
+ } else {
121
+ tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
122
+ l->addrlo_reg);
123
+ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
124
+
125
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RAX, (uintptr_t)l->raddr);
126
+ tcg_out_push(s, TCG_REG_RAX);
127
+ }
128
+
129
+ /* "Tail call" to the helper, with the return address back inline. */
130
+ tcg_out_jmp(s, (const void *)(l->is_ld ? helper_unaligned_ld
131
+ : helper_unaligned_st));
132
+ return true;
133
+}
134
+
135
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
136
+{
137
+ return tcg_out_fail_alignment(s, l);
138
+}
139
+
140
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
141
+{
142
+ return tcg_out_fail_alignment(s, l);
143
+}
144
+
145
+#if TCG_TARGET_REG_BITS == 32
146
# define x86_guest_base_seg 0
147
# define x86_guest_base_index -1
148
# define x86_guest_base_offset guest_base
149
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
150
return 0;
151
}
89
}
152
# endif
90
153
+#endif
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
154
#endif /* SOFTMMU */
92
-{
155
93
- /* If true and false values are the same, eliminate the cmp. */
156
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
94
- if (args_are_copies(op->args[2], op->args[3])) {
157
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
158
#if defined(CONFIG_SOFTMMU)
96
- }
159
int mem_index;
97
-
160
tcg_insn_unit *label_ptr[2];
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
161
+#else
99
- uint64_t tv = arg_info(op->args[2])->val;
162
+ unsigned a_bits;
100
- uint64_t fv = arg_info(op->args[3])->val;
163
#endif
101
-
164
102
- if (tv == -1 && fv == 0) {
165
datalo = *args++;
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
166
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
104
- }
167
add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
105
- if (tv == 0 && fv == -1) {
168
s->code_ptr, label_ptr);
106
- if (TCG_TARGET_HAS_not_vec) {
169
#else
107
- op->opc = INDEX_op_not_vec;
170
+ a_bits = get_alignment_bits(opc);
108
- return fold_not(ctx, op);
171
+ if (a_bits) {
109
- } else {
172
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
110
- op->opc = INDEX_op_xor_vec;
173
+ }
111
- op->args[2] = arg_new_constant(ctx, -1);
174
+
112
- return fold_xor(ctx, op);
175
tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
113
- }
176
x86_guest_base_offset, x86_guest_base_seg,
114
- }
177
is64, opc);
115
- }
178
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
116
- if (arg_is_const(op->args[2])) {
179
#if defined(CONFIG_SOFTMMU)
117
- uint64_t tv = arg_info(op->args[2])->val;
180
int mem_index;
118
- if (tv == -1) {
181
tcg_insn_unit *label_ptr[2];
119
- op->opc = INDEX_op_or_vec;
182
+#else
120
- op->args[2] = op->args[3];
183
+ unsigned a_bits;
121
- return fold_or(ctx, op);
184
#endif
122
- }
185
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
186
datalo = *args++;
124
- op->opc = INDEX_op_andc_vec;
187
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
125
- op->args[2] = op->args[1];
188
add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
126
- op->args[1] = op->args[3];
189
s->code_ptr, label_ptr);
127
- return fold_andc(ctx, op);
190
#else
128
- }
191
+ a_bits = get_alignment_bits(opc);
129
- }
192
+ if (a_bits) {
130
- if (arg_is_const(op->args[3])) {
193
+ tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
131
- uint64_t fv = arg_info(op->args[3])->val;
194
+ }
132
- if (fv == 0) {
195
+
133
- op->opc = INDEX_op_and_vec;
196
tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
134
- return fold_and(ctx, op);
197
x86_guest_base_offset, x86_guest_base_seg, opc);
135
- }
198
#endif
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
144
-}
145
-
146
/* Propagate constants and copies, fold constant expressions. */
147
void tcg_optimize(TCGContext *s)
148
{
199
--
149
--
200
2.25.1
150
2.43.0
201
202
diff view generated by jsdifflib
1
The big comment just above says functions should be sorted.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
---
5
---
3
tcg/riscv/tcg-target.h | 2 --
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
4
tcg/riscv/tcg-target.c.inc | 63 ++++++++++++++++++++++++++++++++++++--
7
1 file changed, 30 insertions(+), 30 deletions(-)
5
2 files changed, 61 insertions(+), 4 deletions(-)
6
8
7
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/riscv/tcg-target.h
11
--- a/tcg/optimize.c
10
+++ b/tcg/riscv/tcg-target.h
12
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
12
13
#define TCG_TARGET_DEFAULT_MO (0)
14
15
-#ifdef CONFIG_SOFTMMU
16
#define TCG_TARGET_NEED_LDST_LABELS
17
-#endif
18
#define TCG_TARGET_NEED_POOL_LABELS
19
20
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
21
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
22
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/riscv/tcg-target.c.inc
24
+++ b/tcg/riscv/tcg-target.c.inc
25
@@ -XXX,XX +XXX,XX @@
26
* THE SOFTWARE.
27
*/
28
29
+#include "../tcg-ldst.c.inc"
30
#include "../tcg-pool.c.inc"
31
32
#ifdef CONFIG_DEBUG_TCG
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
34
*/
35
36
#if defined(CONFIG_SOFTMMU)
37
-#include "../tcg-ldst.c.inc"
38
-
39
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
40
* MemOpIdx oi, uintptr_t ra)
41
*/
42
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
43
tcg_out_goto(s, l->raddr);
44
return true;
14
return true;
45
}
15
}
46
+#else
16
47
+
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
48
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
49
+ unsigned a_bits)
50
+{
18
+{
51
+ unsigned a_mask = (1 << a_bits) - 1;
19
+ /* Canonicalize the comparison to put immediate second. */
52
+ TCGLabelQemuLdst *l = new_ldst_label(s);
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
53
+
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
54
+ l->is_ld = is_ld;
22
+ }
55
+ l->addrlo_reg = addr_reg;
23
+ return finish_folding(ctx, op);
56
+
57
+ /* We are expecting a_bits to max out at 7, so we can always use andi. */
58
+ tcg_debug_assert(a_bits < 12);
59
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
60
+
61
+ l->label_ptr[0] = s->code_ptr;
62
+ tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
63
+
64
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
65
+}
24
+}
66
+
25
+
67
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
68
+{
27
+{
69
+ /* resolve label address */
28
+ /* If true and false values are the same, eliminate the cmp. */
70
+ if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
29
+ if (args_are_copies(op->args[3], op->args[4])) {
71
+ return false;
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
72
+ }
31
+ }
73
+
32
+
74
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
33
+ /* Canonicalize the comparison to put immediate second. */
75
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
76
+
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
77
+ /* tail call, with the return address back inline. */
36
+ }
78
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
37
+ /*
79
+ tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
38
+ * Canonicalize the "false" input reg to match the destination,
80
+ : helper_unaligned_st), true);
39
+ * so that the tcg backend can implement "move if true".
81
+ return true;
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
82
+}
45
+}
83
+
46
+
84
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
85
+{
48
{
86
+ return tcg_out_fail_alignment(s, l);
49
uint64_t z_mask, s_mask;
87
+}
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
88
+
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
89
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
52
}
90
+{
53
91
+ return tcg_out_fail_alignment(s, l);
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
92
+}
55
-{
93
+
56
- /* Canonicalize the comparison to put immediate second. */
94
#endif /* CONFIG_SOFTMMU */
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
95
58
- op->args[3] = tcg_swap_cond(op->args[3]);
96
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
59
- }
97
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
60
- return finish_folding(ctx, op);
98
MemOp opc;
61
-}
99
#if defined(CONFIG_SOFTMMU)
62
-
100
tcg_insn_unit *label_ptr[1];
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
101
+#else
64
-{
102
+ unsigned a_bits;
65
- /* If true and false values are the same, eliminate the cmp. */
103
#endif
66
- if (args_are_copies(op->args[3], op->args[4])) {
104
TCGReg base = TCG_REG_TMP0;
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
105
68
- }
106
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
69
-
107
tcg_out_ext32u(s, base, addr_regl);
70
- /* Canonicalize the comparison to put immediate second. */
108
addr_regl = base;
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
109
}
72
- op->args[5] = tcg_swap_cond(op->args[5]);
110
+ a_bits = get_alignment_bits(opc);
73
- }
111
+ if (a_bits) {
74
- /*
112
+ tcg_out_test_alignment(s, true, addr_regl, a_bits);
75
- * Canonicalize the "false" input reg to match the destination,
113
+ }
76
- * so that the tcg backend can implement "move if true".
114
if (guest_base != 0) {
77
- */
115
tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl);
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
116
}
79
- op->args[5] = tcg_invert_cond(op->args[5]);
117
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
80
- }
118
MemOp opc;
81
- return finish_folding(ctx, op);
119
#if defined(CONFIG_SOFTMMU)
82
-}
120
tcg_insn_unit *label_ptr[1];
83
-
121
+#else
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
122
+ unsigned a_bits;
85
{
123
#endif
86
uint64_t z_mask, s_mask, s_mask_old;
124
TCGReg base = TCG_REG_TMP0;
125
126
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
127
tcg_out_ext32u(s, base, addr_regl);
128
addr_regl = base;
129
}
130
+ a_bits = get_alignment_bits(opc);
131
+ if (a_bits) {
132
+ tcg_out_test_alignment(s, false, addr_regl, a_bits);
133
+ }
134
if (guest_base != 0) {
135
tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl);
136
}
137
--
87
--
138
2.25.1
88
2.43.0
139
140
diff view generated by jsdifflib
1
From: WANG Xuerui <git@xen0n.name>
1
We currently have a flag, float_muladd_halve_result, to scale
2
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
Signed-off-by: WANG Xuerui <git@xen0n.name>
3
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Message-Id: <20220106134238.3936163-1-git@xen0n.name>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/loongarch64/tcg-target.h | 2 -
7
include/fpu/softfloat.h | 6 ++++
9
tcg/loongarch64/tcg-target.c.inc | 71 +++++++++++++++++++++++++++++++-
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
10
2 files changed, 69 insertions(+), 4 deletions(-)
9
fpu/softfloat-parts.c.inc | 7 +++--
11
10
3 files changed, 44 insertions(+), 27 deletions(-)
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.h
14
--- a/include/fpu/softfloat.h
15
+++ b/tcg/loongarch64/tcg-target.h
15
+++ b/include/fpu/softfloat.h
16
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
17
17
float16 float16_sub(float16, float16, float_status *status);
18
#define TCG_TARGET_DEFAULT_MO (0)
18
float16 float16_mul(float16, float16, float_status *status);
19
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
20
-#ifdef CONFIG_SOFTMMU
20
+float16 float16_muladd_scalbn(float16, float16, float16,
21
#define TCG_TARGET_NEED_LDST_LABELS
21
+ int, int, float_status *status);
22
-#endif
22
float16 float16_div(float16, float16, float_status *status);
23
23
float16 float16_scalbn(float16, int, float_status *status);
24
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
24
float16 float16_min(float16, float16, float_status *status);
25
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
27
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
28
--- a/tcg/loongarch64/tcg-target.c.inc
45
--- a/fpu/softfloat.c
29
+++ b/tcg/loongarch64/tcg-target.c.inc
46
+++ b/fpu/softfloat.c
30
@@ -XXX,XX +XXX,XX @@
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
31
* THE SOFTWARE.
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
32
*/
73
*/
33
74
34
+#include "../tcg-ldst.c.inc"
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
35
+
76
- int flags, float_status *status)
36
#ifdef CONFIG_DEBUG_TCG
77
+float16 QEMU_FLATTEN
37
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
38
"zero",
79
+ int scale, int flags, float_status *status)
39
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
80
{
40
*/
81
FloatParts64 pa, pb, pc, *pr;
41
82
42
#if defined(CONFIG_SOFTMMU)
83
float16_unpack_canonical(&pa, a, status);
43
-#include "../tcg-ldst.c.inc"
84
float16_unpack_canonical(&pb, b, status);
44
-
85
float16_unpack_canonical(&pc, c, status);
45
/*
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
46
* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
47
* MemOpIdx oi, uintptr_t ra)
88
48
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
89
return float16_round_pack_canonical(pr, status);
49
90
}
50
return tcg_out_goto(s, l->raddr);
91
51
}
92
-static float32 QEMU_SOFTFLOAT_ATTR
52
+#else
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
53
+
94
- float_status *status)
54
+/*
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
55
+ * Alignment helpers for user-mode emulation
96
+ int flags, float_status *status)
56
+ */
57
+
58
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
59
+ unsigned a_bits)
60
+{
97
+{
61
+ TCGLabelQemuLdst *l = new_ldst_label(s);
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
62
+
63
+ l->is_ld = is_ld;
64
+ l->addrlo_reg = addr_reg;
65
+
66
+ /*
67
+ * Without micro-architecture details, we don't know which of bstrpick or
68
+ * andi is faster, so use bstrpick as it's not constrained by imm field
69
+ * width. (Not to say alignments >= 2^12 are going to happen any time
70
+ * soon, though)
71
+ */
72
+ tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
73
+
74
+ l->label_ptr[0] = s->code_ptr;
75
+ tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
76
+
77
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
78
+}
99
+}
79
+
100
+
80
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
101
+float32 QEMU_SOFTFLOAT_ATTR
81
+{
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
82
+ /* resolve label address */
103
+ int scale, int flags, float_status *status)
83
+ if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
104
{
84
+ return false;
105
FloatParts64 pa, pb, pc, *pr;
85
+ }
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
86
+
182
+
87
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
88
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
89
+
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
90
+ /* tail call, with the return address back inline. */
186
xnp = *parts_mul(&xnp, &xp, status);
91
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
187
}
92
+ tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
188
93
+ : helper_unaligned_st), true);
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
94
+ return true;
190
index XXXXXXX..XXXXXXX 100644
95
+}
191
--- a/fpu/softfloat-parts.c.inc
96
+
192
+++ b/fpu/softfloat-parts.c.inc
97
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
98
+{
194
* Requires A and C extracted into a double-sized structure to provide the
99
+ return tcg_out_fail_alignment(s, l);
195
* extra space for the widening multiply.
100
+}
196
*/
101
+
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
102
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
198
- FloatPartsN *c, int flags, float_status *s)
103
+{
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
104
+ return tcg_out_fail_alignment(s, l);
200
+ FloatPartsN *c, int scale,
105
+}
201
+ int flags, float_status *s)
106
+
202
{
107
#endif /* CONFIG_SOFTMMU */
203
int ab_mask, abc_mask;
108
204
FloatPartsW p_widen, c_widen;
109
/*
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
110
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
206
a->exp = p_widen.exp;
111
MemOp opc;
207
112
#if defined(CONFIG_SOFTMMU)
208
return_normal:
113
tcg_insn_unit *label_ptr[1];
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
114
+#else
210
if (flags & float_muladd_halve_result) {
115
+ unsigned a_bits;
211
a->exp -= 1;
116
#endif
212
}
117
TCGReg base;
213
+ a->exp += scale;
118
214
finish_sign:
119
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
215
if (flags & float_muladd_negate_result) {
120
data_regl, addr_regl,
216
a->sign ^= 1;
121
s->code_ptr, label_ptr);
122
#else
123
+ a_bits = get_alignment_bits(opc);
124
+ if (a_bits) {
125
+ tcg_out_test_alignment(s, true, addr_regl, a_bits);
126
+ }
127
base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
128
TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
129
tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
130
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
131
MemOp opc;
132
#if defined(CONFIG_SOFTMMU)
133
tcg_insn_unit *label_ptr[1];
134
+#else
135
+ unsigned a_bits;
136
#endif
137
TCGReg base;
138
139
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
140
data_regl, addr_regl,
141
s->code_ptr, label_ptr);
142
#else
143
+ a_bits = get_alignment_bits(opc);
144
+ if (a_bits) {
145
+ tcg_out_test_alignment(s, false, addr_regl, a_bits);
146
+ }
147
base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
148
TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
149
tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
150
--
217
--
151
2.25.1
218
2.43.0
152
219
153
220
diff view generated by jsdifflib
1
Sparc64 is unique on linux in *not* passing ucontext_t as
1
Use the scalbn interface instead of float_muladd_halve_result.
2
the third argument to a SA_SIGINFO handler. It passes the
3
old struct sigcontext instead.
4
2
5
Set both pc and npc in host_signal_set_pc.
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
7
Fixes: 8b5bd461935b ("linux-user/host/sparc: Populate host_signal.h")
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
5
---
11
linux-user/include/host/sparc64/host-signal.h | 17 +++++++++--------
6
target/arm/tcg/helper-a64.c | 6 +++---
12
1 file changed, 9 insertions(+), 8 deletions(-)
7
1 file changed, 3 insertions(+), 3 deletions(-)
13
8
14
diff --git a/linux-user/include/host/sparc64/host-signal.h b/linux-user/include/host/sparc64/host-signal.h
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
15
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/include/host/sparc64/host-signal.h
11
--- a/target/arm/tcg/helper-a64.c
17
+++ b/linux-user/include/host/sparc64/host-signal.h
12
+++ b/target/arm/tcg/helper-a64.c
18
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
19
#ifndef SPARC64_HOST_SIGNAL_H
14
(float16_is_infinity(b) && float16_is_zero(a))) {
20
#define SPARC64_HOST_SIGNAL_H
15
return float16_one_point_five;
21
16
}
22
-/* FIXME: the third argument to a SA_SIGINFO handler is *not* ucontext_t. */
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
23
-typedef ucontext_t host_sigcontext;
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
24
+/* The third argument to a SA_SIGINFO handler is struct sigcontext. */
25
+typedef struct sigcontext host_sigcontext;
26
27
-static inline uintptr_t host_signal_pc(host_sigcontext *uc)
28
+static inline uintptr_t host_signal_pc(host_sigcontext *sc)
29
{
30
- return uc->uc_mcontext.mc_gregs[MC_PC];
31
+ return sc->sigc_regs.tpc;
32
}
19
}
33
20
34
-static inline void host_signal_set_pc(host_sigcontext *uc, uintptr_t pc)
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
35
+static inline void host_signal_set_pc(host_sigcontext *sc, uintptr_t pc)
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
36
{
23
(float32_is_infinity(b) && float32_is_zero(a))) {
37
- uc->uc_mcontext.mc_gregs[MC_PC] = pc;
24
return float32_one_point_five;
38
+ sc->sigc_regs.tpc = pc;
25
}
39
+ sc->sigc_regs.tnpc = pc + 4;
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
40
}
28
}
41
29
42
-static inline void *host_signal_mask(host_sigcontext *uc)
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
43
+static inline void *host_signal_mask(host_sigcontext *sc)
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
44
{
32
(float64_is_infinity(b) && float64_is_zero(a))) {
45
- return &uc->uc_sigmask;
33
return float64_one_point_five;
46
+ return &sc->sigc_mask;
34
}
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
47
}
37
}
48
38
49
static inline bool host_signal_write(siginfo_t *info, host_sigcontext *uc)
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
50
--
40
--
51
2.25.1
41
2.43.0
52
42
53
43
diff view generated by jsdifflib
New patch
1
1
Use the scalbn interface instead of float_muladd_halve_result.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/sparc/helper.h | 4 +-
7
target/sparc/fop_helper.c | 8 ++--
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/helper.h
14
+++ b/target/sparc/helper.h
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
23
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
205
--
206
2.43.0
207
208
diff view generated by jsdifflib
1
When BH is constant, it is constrained to 11 bits for use in MOVCC.
1
All uses have been convered to float*_muladd_scalbn.
2
For the cases in which we must load the constant BH into a register,
3
we do not need the full logic of tcg_out_movi; we can use the simpler
4
function for emitting a 13 bit constant.
5
2
6
This eliminates the only case in which TCG_REG_T2 was passed to
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
tcg_out_movi, which will shortly become invalid.
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
5
---
12
tcg/sparc/tcg-target.c.inc | 10 +++++++---
6
include/fpu/softfloat.h | 3 ---
13
1 file changed, 7 insertions(+), 3 deletions(-)
7
fpu/softfloat.c | 6 ------
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
14
10
15
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
16
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/sparc/tcg-target.c.inc
13
--- a/include/fpu/softfloat.h
18
+++ b/tcg/sparc/tcg-target.c.inc
14
+++ b/include/fpu/softfloat.h
19
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
20
if (use_vis3_instructions && !is_sub) {
16
| Using these differs from negating an input or output before calling
21
/* Note that ADDXC doesn't accept immediates. */
17
| the muladd function in that this means that a NaN doesn't have its
22
if (bhconst && bh != 0) {
18
| sign bit inverted before it is propagated.
23
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
19
-| We also support halving the result before rounding, as a special
24
+ tcg_out_movi_imm13(s, TCG_REG_T2, bh);
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
25
bh = TCG_REG_T2;
21
*----------------------------------------------------------------------------*/
26
}
22
enum {
27
tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
23
float_muladd_negate_c = 1,
28
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
24
float_muladd_negate_product = 2,
29
     tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
25
float_muladd_negate_result = 4,
30
    }
26
- float_muladd_halve_result = 8,
31
} else {
27
};
32
- /* Otherwise adjust BH as if there is carry into T2 ... */
28
33
+ /*
29
/*----------------------------------------------------------------------------
34
+ * Otherwise adjust BH as if there is carry into T2.
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
35
+ * Note that constant BH is constrained to 11 bits for the MOVCC,
31
index XXXXXXX..XXXXXXX 100644
36
+ * so the adjustment fits 12 bits.
32
--- a/fpu/softfloat.c
37
+ */
33
+++ b/fpu/softfloat.c
38
if (bhconst) {
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
39
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
35
if (unlikely(!can_use_fpu(s))) {
40
+ tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
36
goto soft;
41
} else {
37
}
42
tcg_out_arithi(s, TCG_REG_T2, bh, 1,
38
- if (unlikely(flags & float_muladd_halve_result)) {
43
is_sub ? ARITH_SUB : ARITH_ADD);
39
- goto soft;
40
- }
41
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
55
index XXXXXXX..XXXXXXX 100644
56
--- a/fpu/softfloat-parts.c.inc
57
+++ b/fpu/softfloat-parts.c.inc
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
59
a->exp = p_widen.exp;
60
61
return_normal:
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
63
- if (flags & float_muladd_halve_result) {
64
- a->exp -= 1;
65
- }
66
a->exp += scale;
67
finish_sign:
68
if (flags & float_muladd_negate_result) {
44
--
69
--
45
2.25.1
70
2.43.0
46
71
47
72
diff view generated by jsdifflib
New patch
1
This rounding mode is used by Hexagon.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/fpu/softfloat-types.h | 2 ++
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
8
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/fpu/softfloat-types.h
12
+++ b/include/fpu/softfloat-types.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
21
/*
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/fpu/softfloat-parts.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
36
--
37
2.43.0
diff view generated by jsdifflib
New patch
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/fpu/softfloat.h | 5 +++++
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
39
}
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.h | 1 -
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
11
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/fma_emu.h
15
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
30
}
31
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
33
-{
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
38
-}
39
-
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
48
{
49
float32 RdV;
50
arch_fpop_start(env);
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
53
arch_fpop_end(env);
54
return RdV;
55
}
56
--
57
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
1
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
1
We will shortly allow the use of unaligned memory accesses,
1
This instruction has a special case that 0 * x + c returns c
2
and these require proper alignment. Use get_alignment_bits
2
without the normal sign folding that comes with 0 + -0.
3
to verify and remove USING_SOFTMMU.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
4
5
5
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/arm/tcg-target.c.inc | 23 ++++++++---------------
9
target/hexagon/op_helper.c | 11 +++--------
9
1 file changed, 8 insertions(+), 15 deletions(-)
10
1 file changed, 3 insertions(+), 8 deletions(-)
10
11
11
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/arm/tcg-target.c.inc
14
--- a/target/hexagon/op_helper.c
14
+++ b/tcg/arm/tcg-target.c.inc
15
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ bool use_idiv_instructions;
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
16
bool use_neon_instructions;
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
17
#endif
18
float32 RsV, float32 RtV, float32 PuV)
18
19
{
19
-/* ??? Ought to think about changing CONFIG_SOFTMMU to always defined. */
20
- size4s_t tmp;
20
-#ifdef CONFIG_SOFTMMU
21
arch_fpop_start(env);
21
-# define USING_SOFTMMU 1
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
22
-#else
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
23
-# define USING_SOFTMMU 0
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
24
-#endif
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
25
-
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
26
#ifdef CONFIG_DEBUG_TCG
27
- RxV = tmp;
27
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
28
- }
28
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
30
+ float_muladd_suppress_add_product_zero,
30
tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
31
+ &env->fp_status);
31
break;
32
arch_fpop_end(env);
32
case MO_UQ:
33
return RxV;
33
- /* Avoid ldrd for user-only emulation, to handle unaligned. */
34
}
34
- if (USING_SOFTMMU
35
+ /* LDRD requires alignment; double-check that. */
36
+ if (get_alignment_bits(opc) >= MO_64
37
&& (datalo & 1) == 0 && datahi == datalo + 1) {
38
tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
39
} else if (datalo != addend) {
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
41
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
42
break;
43
case MO_UQ:
44
- /* Avoid ldrd for user-only emulation, to handle unaligned. */
45
- if (USING_SOFTMMU
46
+ /* LDRD requires alignment; double-check that. */
47
+ if (get_alignment_bits(opc) >= MO_64
48
&& (datalo & 1) == 0 && datahi == datalo + 1) {
49
tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
50
} else if (datalo == addrlo) {
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
52
tcg_out_st32_r(s, cond, datalo, addrlo, addend);
53
break;
54
case MO_64:
55
- /* Avoid strd for user-only emulation, to handle unaligned. */
56
- if (USING_SOFTMMU
57
+ /* STRD requires alignment; double-check that. */
58
+ if (get_alignment_bits(opc) >= MO_64
59
&& (datalo & 1) == 0 && datahi == datalo + 1) {
60
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
61
} else {
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
63
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
64
break;
65
case MO_64:
66
- /* Avoid strd for user-only emulation, to handle unaligned. */
67
- if (USING_SOFTMMU
68
+ /* STRD requires alignment; double-check that. */
69
+ if (get_alignment_bits(opc) >= MO_64
70
&& (datalo & 1) == 0 && datahi == datalo + 1) {
71
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
72
} else {
73
--
35
--
74
2.25.1
36
2.43.0
75
76
diff view generated by jsdifflib
1
This is kinda sorta the opposite of the other tcg hosts, where
1
There are multiple special cases for this instruction.
2
we get (normal) alignment checks for free with host SIGBUS and
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
need to add code to support unaligned accesses.
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
4
8
5
This inline code expansion is somewhat large, but it takes quite
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
a few instructions to make a function call to a helper anyway.
7
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
---
11
tcg/sparc/tcg-target.c.inc | 219 +++++++++++++++++++++++++++++++++++--
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
12
1 file changed, 211 insertions(+), 8 deletions(-)
13
1 file changed, 26 insertions(+), 79 deletions(-)
13
14
14
diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
15
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/sparc/tcg-target.c.inc
17
--- a/target/hexagon/op_helper.c
17
+++ b/tcg/sparc/tcg-target.c.inc
18
+++ b/target/hexagon/op_helper.c
18
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_oarg_regs[] = {
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
19
#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
20
return RxV;
20
#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
21
#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
22
+#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
23
#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
24
#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
25
#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
26
@@ -XXX,XX +XXX,XX @@ static void build_trampolines(TCGContext *s)
27
tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
28
}
29
}
21
}
30
+#else
22
31
+static const tcg_insn_unit *qemu_unalign_ld_trampoline;
23
-static bool is_zero_prod(float32 a, float32 b)
32
+static const tcg_insn_unit *qemu_unalign_st_trampoline;
24
-{
25
- return ((float32_is_zero(a) && is_finite(b)) ||
26
- (float32_is_zero(b) && is_finite(a)));
27
-}
28
-
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
30
-{
31
- float32 ret = dst;
32
- if (float32_is_any_nan(x)) {
33
- if (extract32(x, 22, 1) == 0) {
34
- float_raise(float_flag_invalid, fp_status);
35
- }
36
- ret = make_float32(0xffffffff); /* nan */
37
- }
38
- return ret;
39
-}
40
-
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
42
float32 RsV, float32 RtV, float32 PuV)
43
{
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
45
return RxV;
46
}
47
48
-static bool is_inf_prod(int32_t a, int32_t b)
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
50
+ float32 RsV, float32 RtV, int negate)
51
{
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
55
+ int flags;
33
+
56
+
34
+static void build_trampolines(TCGContext *s)
57
+ arch_fpop_start(env);
35
+{
36
+ for (int ld = 0; ld < 2; ++ld) {
37
+ void *helper;
38
+
58
+
39
+ while ((uintptr_t)s->code_ptr & 15) {
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
40
+ tcg_out_nop(s);
60
+ RxV = float32_muladd(RsV, RtV, RxV,
41
+ }
61
+ negate | float_muladd_suppress_add_product_zero,
62
+ &env->fp_status);
42
+
63
+
43
+ if (ld) {
64
+ flags = get_float_exception_flags(&env->fp_status);
44
+ helper = helper_unaligned_ld;
65
+ if (flags) {
45
+ qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
66
+ /* Flags are suppressed by this instruction. */
46
+ } else {
67
+ set_float_exception_flags(0, &env->fp_status);
47
+ helper = helper_unaligned_st;
48
+ qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
49
+ }
50
+
68
+
51
+ if (!SPARC64 && TARGET_LONG_BITS == 64) {
69
+ /* Return 0 for Inf - Inf. */
52
+ /* Install the high part of the address. */
70
+ if (flags & float_flag_invalid_isi) {
53
+ tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
71
+ RxV = 0;
54
+ }
55
+
56
+ /* Tail call. */
57
+ tcg_out_jmpl_const(s, helper, true, true);
58
+ /* delay slot -- set the env argument */
59
+ tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
60
+ }
61
+}
62
#endif
63
64
/* Generate global QEMU prologue and epilogue code */
65
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
66
/* delay slot */
67
tcg_out_movi_imm13(s, TCG_REG_O0, 0);
68
69
-#ifdef CONFIG_SOFTMMU
70
build_trampolines(s);
71
-#endif
72
}
73
74
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
75
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
76
static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
77
[MO_UB] = LDUB,
78
[MO_SB] = LDSB,
79
+ [MO_UB | MO_LE] = LDUB,
80
+ [MO_SB | MO_LE] = LDSB,
81
82
[MO_BEUW] = LDUH,
83
[MO_BESW] = LDSH,
84
[MO_BEUL] = LDUW,
85
[MO_BESL] = LDSW,
86
[MO_BEUQ] = LDX,
87
+ [MO_BESQ] = LDX,
88
89
[MO_LEUW] = LDUH_LE,
90
[MO_LESW] = LDSH_LE,
91
[MO_LEUL] = LDUW_LE,
92
[MO_LESL] = LDSW_LE,
93
[MO_LEUQ] = LDX_LE,
94
+ [MO_LESQ] = LDX_LE,
95
};
96
97
static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
98
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
99
MemOpIdx oi, bool is_64)
100
{
101
MemOp memop = get_memop(oi);
102
+ tcg_insn_unit *label_ptr;
103
+
104
#ifdef CONFIG_SOFTMMU
105
unsigned memi = get_mmuidx(oi);
106
TCGReg addrz, param;
107
const tcg_insn_unit *func;
108
- tcg_insn_unit *label_ptr;
109
110
addrz = tcg_out_tlb_load(s, addr, memi, memop,
111
offsetof(CPUTLBEntry, addr_read));
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
113
114
*label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
115
#else
116
+ TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
117
+ unsigned a_bits = get_alignment_bits(memop);
118
+ unsigned s_bits = memop & MO_SIZE;
119
+ unsigned t_bits;
120
+
121
if (SPARC64 && TARGET_LONG_BITS == 32) {
122
tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
123
addr = TCG_REG_T1;
124
}
125
- tcg_out_ldst_rr(s, data, addr,
126
- (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
127
+
128
+ /*
129
+ * Normal case: alignment equal to access size.
130
+ */
131
+ if (a_bits == s_bits) {
132
+ tcg_out_ldst_rr(s, data, addr, index,
133
+ qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
134
+ return;
135
+ }
136
+
137
+ /*
138
+ * Test for at least natural alignment, and assume most accesses
139
+ * will be aligned -- perform a straight load in the delay slot.
140
+ * This is required to preserve atomicity for aligned accesses.
141
+ */
142
+ t_bits = MAX(a_bits, s_bits);
143
+ tcg_debug_assert(t_bits < 13);
144
+ tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
145
+
146
+ /* beq,a,pt %icc, label */
147
+ label_ptr = s->code_ptr;
148
+ tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
149
+ /* delay slot */
150
+ tcg_out_ldst_rr(s, data, addr, index,
151
qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
152
+
153
+ if (a_bits >= s_bits) {
154
+ /*
155
+ * Overalignment: A successful alignment test will perform the memory
156
+ * operation in the delay slot, and failure need only invoke the
157
+ * handler for SIGBUS.
158
+ */
159
+ TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64);
160
+ tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
161
+ /* delay slot -- move to low part of argument reg */
162
+ tcg_out_mov_delay(s, arg_low, addr);
163
+ } else {
164
+ /* Underalignment: load by pieces of minimum alignment. */
165
+ int ld_opc, a_size, s_size, i;
166
+
167
+ /*
168
+ * Force full address into T1 early; avoids problems with
169
+ * overlap between @addr and @data.
170
+ */
171
+ tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
172
+
173
+ a_size = 1 << a_bits;
174
+ s_size = 1 << s_bits;
175
+ if ((memop & MO_BSWAP) == MO_BE) {
176
+ ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
177
+ tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
178
+ ld_opc = qemu_ld_opc[a_bits | MO_BE];
179
+ for (i = a_size; i < s_size; i += a_size) {
180
+ tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
181
+ tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
182
+ tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
183
+ }
184
+ } else if (a_bits == 0) {
185
+ ld_opc = LDUB;
186
+ tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
187
+ for (i = a_size; i < s_size; i += a_size) {
188
+ if ((memop & MO_SIGN) && i == s_size - a_size) {
189
+ ld_opc = LDSB;
190
+ }
191
+ tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
192
+ tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
193
+ tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
194
+ }
195
+ } else {
196
+ ld_opc = qemu_ld_opc[a_bits | MO_LE];
197
+ tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
198
+ for (i = a_size; i < s_size; i += a_size) {
199
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
200
+ if ((memop & MO_SIGN) && i == s_size - a_size) {
201
+ ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
202
+ }
203
+ tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
204
+ tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
205
+ tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
206
+ }
207
+ }
72
+ }
208
+ }
73
+ }
209
+
74
+
210
+ *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
75
+ arch_fpop_end(env);
211
#endif /* CONFIG_SOFTMMU */
76
+ return RxV;
212
}
77
}
213
78
214
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
215
MemOpIdx oi)
80
float32 RsV, float32 RtV)
216
{
81
{
217
MemOp memop = get_memop(oi);
82
- bool infinp;
218
+ tcg_insn_unit *label_ptr;
83
- bool infminusinf;
219
+
84
- float32 tmp;
220
#ifdef CONFIG_SOFTMMU
85
-
221
unsigned memi = get_mmuidx(oi);
86
- arch_fpop_start(env);
222
TCGReg addrz, param;
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
223
const tcg_insn_unit *func;
88
- infminusinf = float32_is_infinity(RxV) &&
224
- tcg_insn_unit *label_ptr;
89
- is_inf_prod(RsV, RtV) &&
225
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
226
addrz = tcg_out_tlb_load(s, addr, memi, memop,
91
- infinp = float32_is_infinity(RxV) ||
227
offsetof(CPUTLBEntry, addr_write));
92
- float32_is_infinity(RtV) ||
228
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
93
- float32_is_infinity(RsV);
229
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
230
*label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
231
#else
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
232
+ TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
233
+ unsigned a_bits = get_alignment_bits(memop);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
234
+ unsigned s_bits = memop & MO_SIZE;
99
- RxV = tmp;
235
+ unsigned t_bits;
100
- }
236
+
101
- set_float_exception_flags(0, &env->fp_status);
237
if (SPARC64 && TARGET_LONG_BITS == 32) {
102
- if (float32_is_infinity(RxV) && !infinp) {
238
tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
103
- RxV = RxV - 1;
239
addr = TCG_REG_T1;
104
- }
240
}
105
- if (infminusinf) {
241
- tcg_out_ldst_rr(s, data, addr,
106
- RxV = 0;
242
- (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
107
- }
243
+
108
- arch_fpop_end(env);
244
+ /*
109
- return RxV;
245
+ * Normal case: alignment equal to access size.
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
246
+ */
247
+ if (a_bits == s_bits) {
248
+ tcg_out_ldst_rr(s, data, addr, index,
249
+ qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
250
+ return;
251
+ }
252
+
253
+ /*
254
+ * Test for at least natural alignment, and assume most accesses
255
+ * will be aligned -- perform a straight store in the delay slot.
256
+ * This is required to preserve atomicity for aligned accesses.
257
+ */
258
+ t_bits = MAX(a_bits, s_bits);
259
+ tcg_debug_assert(t_bits < 13);
260
+ tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
261
+
262
+ /* beq,a,pt %icc, label */
263
+ label_ptr = s->code_ptr;
264
+ tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
265
+ /* delay slot */
266
+ tcg_out_ldst_rr(s, data, addr, index,
267
qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
268
+
269
+ if (a_bits >= s_bits) {
270
+ /*
271
+ * Overalignment: A successful alignment test will perform the memory
272
+ * operation in the delay slot, and failure need only invoke the
273
+ * handler for SIGBUS.
274
+ */
275
+ TCGReg arg_low = TCG_REG_O1 + (!SPARC64 && TARGET_LONG_BITS == 64);
276
+ tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
277
+ /* delay slot -- move to low part of argument reg */
278
+ tcg_out_mov_delay(s, arg_low, addr);
279
+ } else {
280
+ /* Underalignment: store by pieces of minimum alignment. */
281
+ int st_opc, a_size, s_size, i;
282
+
283
+ /*
284
+ * Force full address into T1 early; avoids problems with
285
+ * overlap between @addr and @data.
286
+ */
287
+ tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
288
+
289
+ a_size = 1 << a_bits;
290
+ s_size = 1 << s_bits;
291
+ if ((memop & MO_BSWAP) == MO_BE) {
292
+ st_opc = qemu_st_opc[a_bits | MO_BE];
293
+ for (i = 0; i < s_size; i += a_size) {
294
+ TCGReg d = data;
295
+ int shift = (s_size - a_size - i) * 8;
296
+ if (shift) {
297
+ d = TCG_REG_T2;
298
+ tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
299
+ }
300
+ tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
301
+ }
302
+ } else if (a_bits == 0) {
303
+ tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
304
+ for (i = 1; i < s_size; i++) {
305
+ tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
306
+ tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
307
+ }
308
+ } else {
309
+ /* Note that ST*A with immediate asi must use indexed address. */
310
+ st_opc = qemu_st_opc[a_bits + MO_LE];
311
+ tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
312
+ for (i = a_size; i < s_size; i += a_size) {
313
+ tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
314
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
315
+ tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
316
+ }
317
+ }
318
+ }
319
+
320
+ *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
321
#endif /* CONFIG_SOFTMMU */
322
}
111
}
323
112
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
114
float32 RsV, float32 RtV)
115
{
116
- bool infinp;
117
- bool infminusinf;
118
- float32 tmp;
119
-
120
- arch_fpop_start(env);
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
122
- infminusinf = float32_is_infinity(RxV) &&
123
- is_inf_prod(RsV, RtV) &&
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
125
- infinp = float32_is_infinity(RxV) ||
126
- float32_is_infinity(RtV) ||
127
- float32_is_infinity(RsV);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
146
}
147
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
324
--
149
--
325
2.25.1
150
2.43.0
326
327
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
The function is now unused.
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/tci.c | 20 ++++++++++++++------
6
target/hexagon/fma_emu.h | 2 -
5
1 file changed, 14 insertions(+), 6 deletions(-)
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
6
9
7
diff --git a/tcg/tci.c b/tcg/tci.c
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/tci.c
12
--- a/target/hexagon/fma_emu.h
10
+++ b/tcg/tci.c
13
+++ b/target/hexagon/fma_emu.h
11
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
12
static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
15
}
13
MemOpIdx oi, const void *tb_ptr)
16
int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
19
- int scale, float_status *fp_status);
20
float64 internal_mpyhh(float64 a, float64 b,
21
unsigned long long int accumulated,
22
float_status *fp_status);
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/hexagon/fma_emu.c
26
+++ b/target/hexagon/fma_emu.c
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
28
return -1;
29
}
30
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
14
{
47
{
15
- MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
48
Float a = { .i = f32 };
16
+ MemOp mop = get_memop(oi);
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
17
uintptr_t ra = (uintptr_t)tb_ptr;
50
}
18
51
19
#ifdef CONFIG_SOFTMMU
52
/* Return a maximum finite value with the requested sign */
20
- switch (mop) {
53
-static float32 maxfinite_float32(uint8_t sign)
21
+ switch (mop & (MO_BSWAP | MO_SSIZE)) {
54
-{
22
case MO_UB:
55
- if (sign) {
23
return helper_ret_ldub_mmu(env, taddr, oi, ra);
56
- return make_float32(SF_MINUS_MAXF);
24
case MO_SB:
57
- } else {
25
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
58
- return make_float32(SF_MAXF);
26
}
59
- }
27
#else
60
-}
28
void *haddr = g2h(env_cpu(env), taddr);
61
-
29
+ unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
62
-/* Return a zero value with requested sign */
30
uint64_t ret;
63
-static float32 zero_float32(uint8_t sign)
31
64
-{
32
set_helper_retaddr(ra);
65
- if (sign) {
33
- switch (mop) {
66
- return make_float32(0x80000000);
34
+ if (taddr & a_mask) {
67
- } else {
35
+ helper_unaligned_ld(env, taddr);
68
- return float32_zero;
36
+ }
69
- }
37
+ switch (mop & (MO_BSWAP | MO_SSIZE)) {
70
-}
38
case MO_UB:
71
-
39
ret = ldub_p(haddr);
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
40
break;
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
41
@@ -XXX,XX +XXX,XX @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
74
{ \
42
static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
43
MemOpIdx oi, const void *tb_ptr)
76
}
44
{
77
45
- MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE);
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
46
+ MemOp mop = get_memop(oi);
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
47
uintptr_t ra = (uintptr_t)tb_ptr;
80
-
48
81
-static bool is_inf_prod(float64 a, float64 b)
49
#ifdef CONFIG_SOFTMMU
82
-{
50
- switch (mop) {
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
51
+ switch (mop & (MO_BSWAP | MO_SIZE)) {
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
52
case MO_UB:
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
53
helper_ret_stb_mmu(env, taddr, val, oi, ra);
86
-}
54
break;
87
-
55
@@ -XXX,XX +XXX,XX @@ static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
88
-static float64 special_fma(float64 a, float64 b, float64 c,
56
}
89
- float_status *fp_status)
57
#else
90
-{
58
void *haddr = g2h(env_cpu(env), taddr);
91
- float64 ret = make_float64(0);
59
+ unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
92
-
60
93
- /*
61
set_helper_retaddr(ra);
94
- * If A multiplied by B is an exact infinity and C is also an infinity
62
- switch (mop) {
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
63
+ if (taddr & a_mask) {
96
- */
64
+ helper_unaligned_st(env, taddr);
97
- uint8_t a_sign = float64_is_neg(a);
65
+ }
98
- uint8_t b_sign = float64_is_neg(b);
66
+ switch (mop & (MO_BSWAP | MO_SIZE)) {
99
- uint8_t c_sign = float64_is_neg(c);
67
case MO_UB:
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
68
stb_p(haddr, val);
101
- if ((a_sign ^ b_sign) != c_sign) {
69
break;
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
70
--
219
--
71
2.25.1
220
2.43.0
72
73
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
This massive macro is now only used once.
2
Expand it for use only by float64.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/s390x/tcg-target.h | 2 --
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
5
tcg/s390x/tcg-target.c.inc | 59 ++++++++++++++++++++++++++++++++++++--
8
1 file changed, 127 insertions(+), 128 deletions(-)
6
2 files changed, 57 insertions(+), 4 deletions(-)
7
9
8
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/s390x/tcg-target.h
12
--- a/target/hexagon/fma_emu.c
11
+++ b/tcg/s390x/tcg-target.h
13
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
13
/* no need to flush icache explicitly */
14
}
15
}
15
16
16
-#ifdef CONFIG_SOFTMMU
17
/* Return a maximum finite value with the requested sign */
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
18
-#endif
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
19
#define TCG_TARGET_NEED_POOL_LABELS
20
-{ \
20
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
21
#endif
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
22
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
23
- /* result zero */ \
23
index XXXXXXX..XXXXXXX 100644
24
- switch (fp_status->float_rounding_mode) { \
24
--- a/tcg/s390x/tcg-target.c.inc
25
- case float_round_down: \
25
+++ b/tcg/s390x/tcg-target.c.inc
26
- return zero_##SUFFIX(1); \
26
@@ -XXX,XX +XXX,XX @@
27
- default: \
27
#error "unsupported code generation mode"
28
- return zero_##SUFFIX(0); \
28
#endif
29
- } \
29
30
- } \
30
+#include "../tcg-ldst.c.inc"
31
- /* Normalize right */ \
31
#include "../tcg-pool.c.inc"
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
32
#include "elf.h"
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
33
34
- /* So we need to normalize right while the high word is non-zero and \
34
@@ -XXX,XX +XXX,XX @@ typedef enum S390Opcode {
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
35
RI_OIHL = 0xa509,
36
- while ((int128_gethi(a.mant) != 0) || \
36
RI_OILH = 0xa50a,
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
37
RI_OILL = 0xa50b,
38
- a = accum_norm_right(a, 1); \
38
+ RI_TMLL = 0xa701,
39
- } \
39
40
- /* \
40
RIE_CGIJ = 0xec7c,
41
- * OK, now normalize left \
41
RIE_CGRJ = 0xec64,
42
- * We want to normalize left until we have a leading one in bit 24 \
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
43
}
271
}
44
272
45
#if defined(CONFIG_SOFTMMU)
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
46
-#include "../tcg-ldst.c.inc"
47
-
274
-
48
/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
275
float64 internal_mpyhh(float64 a, float64 b,
49
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
276
unsigned long long int accumulated,
50
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
277
float_status *fp_status)
51
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
52
return true;
53
}
54
#else
55
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld,
56
+ TCGReg addrlo, unsigned a_bits)
57
+{
58
+ unsigned a_mask = (1 << a_bits) - 1;
59
+ TCGLabelQemuLdst *l = new_ldst_label(s);
60
+
61
+ l->is_ld = is_ld;
62
+ l->addrlo_reg = addrlo;
63
+
64
+ /* We are expecting a_bits to max out at 7, much lower than TMLL. */
65
+ tcg_debug_assert(a_bits < 16);
66
+ tcg_out_insn(s, RI, TMLL, addrlo, a_mask);
67
+
68
+ tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
69
+ l->label_ptr[0] = s->code_ptr;
70
+ s->code_ptr += 1;
71
+
72
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
73
+}
74
+
75
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
76
+{
77
+ if (!patch_reloc(l->label_ptr[0], R_390_PC16DBL,
78
+ (intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
79
+ return false;
80
+ }
81
+
82
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, l->addrlo_reg);
83
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
84
+
85
+ /* "Tail call" to the helper, with the return address back inline. */
86
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R14, (uintptr_t)l->raddr);
87
+ tgen_gotoi(s, S390_CC_ALWAYS, (const void *)(l->is_ld ? helper_unaligned_ld
88
+ : helper_unaligned_st));
89
+ return true;
90
+}
91
+
92
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
93
+{
94
+ return tcg_out_fail_alignment(s, l);
95
+}
96
+
97
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
98
+{
99
+ return tcg_out_fail_alignment(s, l);
100
+}
101
+
102
static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
103
TCGReg *index_reg, tcg_target_long *disp)
104
{
105
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
106
#else
107
TCGReg index_reg;
108
tcg_target_long disp;
109
+ unsigned a_bits = get_alignment_bits(opc);
110
111
+ if (a_bits) {
112
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
113
+ }
114
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
115
tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
116
#endif
117
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
118
#else
119
TCGReg index_reg;
120
tcg_target_long disp;
121
+ unsigned a_bits = get_alignment_bits(opc);
122
123
+ if (a_bits) {
124
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
125
+ }
126
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
127
tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
128
#endif
129
--
278
--
130
2.25.1
279
2.43.0
131
132
diff view generated by jsdifflib
1
Use the "retl" instead of "ret" instruction alias, since we
1
This structure, with bitfields, is incorrect for big-endian.
2
do not allocate a register window in this function.
2
Use the existing float32_getexp_raw which uses extract32.
3
3
4
Fix the offset to the first stacked parameter, which lies
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
beyond the register window save area.
6
7
Fixes: 95c021dac835 ("linux-user/host/sparc64: Add safe-syscall.inc.S")
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
common-user/host/sparc64/safe-syscall.inc.S | 5 +++--
7
target/hexagon/fma_emu.c | 16 +++-------------
11
1 file changed, 3 insertions(+), 2 deletions(-)
8
1 file changed, 3 insertions(+), 13 deletions(-)
12
9
13
diff --git a/common-user/host/sparc64/safe-syscall.inc.S b/common-user/host/sparc64/safe-syscall.inc.S
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/common-user/host/sparc64/safe-syscall.inc.S
12
--- a/target/hexagon/fma_emu.c
16
+++ b/common-user/host/sparc64/safe-syscall.inc.S
13
+++ b/target/hexagon/fma_emu.c
17
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ typedef union {
18
.type safe_syscall_end, @function
15
};
19
16
} Double;
20
#define STACK_BIAS 2047
17
21
-#define PARAM(N) STACK_BIAS + N*8
18
-typedef union {
22
+#define WINDOW_SIZE 16 * 8
19
- float f;
23
+#define PARAM(N) STACK_BIAS + WINDOW_SIZE + N * 8
20
- uint32_t i;
24
21
- struct {
25
/*
22
- uint32_t mant:23;
26
* This is the entry point for making a system call. The calling
23
- uint32_t exp:8;
27
@@ -XXX,XX +XXX,XX @@ safe_syscall_end:
24
- uint32_t sign:1;
28
/* code path for having successfully executed the syscall */
25
- };
29
bcs,pn %xcc, 1f
26
-} Float;
30
nop
27
-
31
- ret
28
static uint64_t float64_getmant(float64 f64)
32
+ retl
29
{
33
nop
30
Double a = { .i = f64 };
34
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
35
/* code path when we didn't execute the syscall */
32
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
36
--
47
--
37
2.25.1
48
2.43.0
38
39
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
This structure, with bitfields, is incorrect for big-endian.
2
Use extract64 and deposit64 instead.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/ppc/tcg-target.h | 2 -
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
5
tcg/ppc/tcg-target.c.inc | 98 ++++++++++++++++++++++++++++++++++++----
8
1 file changed, 16 insertions(+), 30 deletions(-)
6
2 files changed, 90 insertions(+), 10 deletions(-)
7
9
8
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/ppc/tcg-target.h
12
--- a/target/hexagon/fma_emu.c
11
+++ b/tcg/ppc/tcg-target.h
13
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
13
#define TCG_TARGET_DEFAULT_MO (0)
14
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
15
16
-#ifdef CONFIG_SOFTMMU
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif
22
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/ppc/tcg-target.c.inc
25
+++ b/tcg/ppc/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@
27
15
28
#include "elf.h"
16
#define WAY_BIG_EXP 4096
29
#include "../tcg-pool.c.inc"
17
30
+#include "../tcg-ldst.c.inc"
18
-typedef union {
31
19
- double f;
32
/*
20
- uint64_t i;
33
* Standardize on the _CALL_FOO symbols used by GCC:
21
- struct {
34
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
22
- uint64_t mant:52;
23
- uint64_t exp:11;
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
- Double a = { .i = f64 };
31
+ uint64_t mant = extract64(f64, 0, 52);
32
if (float64_is_normal(f64)) {
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
36
}
44
}
37
45
38
-static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
46
int32_t float64_getexp(float64 f64)
39
+static void tcg_out_call_int(TCGContext *s, int lk,
40
+ const tcg_insn_unit *target)
41
{
47
{
42
#ifdef _CALL_AIX
48
- Double a = { .i = f64 };
43
/* Look through the descriptor. If the branch is in range, and we
49
+ int exp = extract64(f64, 52, 11);
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
50
if (float64_is_normal(f64)) {
45
51
- return a.exp;
46
if (in_range_b(diff) && toc == (uint32_t)toc) {
52
+ return exp;
47
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
48
- tcg_out_b(s, LK, tgt);
49
+ tcg_out_b(s, lk, tgt);
50
} else {
51
/* Fold the low bits of the constant into the addresses below. */
52
intptr_t arg = (intptr_t)target;
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
54
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
55
tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
56
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
57
- tcg_out32(s, BCCTR | BO_ALWAYS | LK);
58
+ tcg_out32(s, BCCTR | BO_ALWAYS | lk);
59
}
53
}
60
#elif defined(_CALL_ELF) && _CALL_ELF == 2
54
if (float64_is_denormal(f64)) {
61
intptr_t diff;
55
- return a.exp + 1;
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
56
+ return exp + 1;
63
64
diff = tcg_pcrel_diff(s, target);
65
if (in_range_b(diff)) {
66
- tcg_out_b(s, LK, target);
67
+ tcg_out_b(s, lk, target);
68
} else {
69
tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
70
- tcg_out32(s, BCCTR | BO_ALWAYS | LK);
71
+ tcg_out32(s, BCCTR | BO_ALWAYS | lk);
72
}
57
}
73
#else
58
return -1;
74
- tcg_out_b(s, LK, target);
75
+ tcg_out_b(s, lk, target);
76
#endif
77
}
59
}
78
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
79
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
61
/* Return a maximum finite value with the requested sign */
80
+{
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
81
+ tcg_out_call_int(s, LK, target);
63
{
82
+}
64
+ uint64_t ret;
83
+
65
+
84
static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
85
[MO_UB] = LBZX,
67
&& ((a.guard | a.round | a.sticky) == 0)) {
86
[MO_UW] = LHZX,
68
/* result zero */
87
@@ -XXX,XX +XXX,XX @@ static const uint32_t qemu_exts_opc[4] = {
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
88
};
70
}
89
71
}
90
#if defined (CONFIG_SOFTMMU)
72
/* Underflow? */
91
-#include "../tcg-ldst.c.inc"
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
92
-
74
+ ret = int128_getlo(a.mant);
93
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
75
+ if (ret & (1ULL << DF_MANTBITS)) {
94
* int mmu_idx, uintptr_t ra)
76
/* Leading one means: No, we're normal. So, we should be done... */
95
*/
77
- Double ret;
96
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
78
- ret.i = 0;
97
tcg_out_b(s, 0, lb->raddr);
79
- ret.sign = a.sign;
98
return true;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
99
}
97
}
100
+#else
98
101
+
99
float64 internal_mpyhh(float64 a, float64 b,
102
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
103
+ TCGReg addrhi, unsigned a_bits)
104
+{
105
+ unsigned a_mask = (1 << a_bits) - 1;
106
+ TCGLabelQemuLdst *label = new_ldst_label(s);
107
+
108
+ label->is_ld = is_ld;
109
+ label->addrlo_reg = addrlo;
110
+ label->addrhi_reg = addrhi;
111
+
112
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
113
+ tcg_debug_assert(a_bits < 16);
114
+ tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask));
115
+
116
+ label->label_ptr[0] = s->code_ptr;
117
+ tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
118
+
119
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr);
120
+}
121
+
122
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
123
+{
124
+ if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
125
+ return false;
126
+ }
127
+
128
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
129
+ TCGReg arg = TCG_REG_R4;
130
+#ifdef TCG_TARGET_CALL_ALIGN_ARGS
131
+ arg |= 1;
132
+#endif
133
+ if (l->addrlo_reg != arg) {
134
+ tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
135
+ tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
136
+ } else if (l->addrhi_reg != arg + 1) {
137
+ tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
138
+ tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
139
+ } else {
140
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg);
141
+ tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1);
142
+ tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0);
143
+ }
144
+ } else {
145
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg);
146
+ }
147
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0);
148
+
149
+ /* "Tail call" to the helper, with the return address back inline. */
150
+ tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld
151
+ : helper_unaligned_st));
152
+ return true;
153
+}
154
+
155
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
156
+{
157
+ return tcg_out_fail_alignment(s, l);
158
+}
159
+
160
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
161
+{
162
+ return tcg_out_fail_alignment(s, l);
163
+}
164
+
165
#endif /* SOFTMMU */
166
167
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
168
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
169
#ifdef CONFIG_SOFTMMU
170
int mem_index;
171
tcg_insn_unit *label_ptr;
172
+#else
173
+ unsigned a_bits;
174
#endif
175
176
datalo = *args++;
177
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
178
179
rbase = TCG_REG_R3;
180
#else /* !CONFIG_SOFTMMU */
181
+ a_bits = get_alignment_bits(opc);
182
+ if (a_bits) {
183
+ tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
184
+ }
185
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
186
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
187
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
188
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
189
#ifdef CONFIG_SOFTMMU
190
int mem_index;
191
tcg_insn_unit *label_ptr;
192
+#else
193
+ unsigned a_bits;
194
#endif
195
196
datalo = *args++;
197
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
198
199
rbase = TCG_REG_R3;
200
#else /* !CONFIG_SOFTMMU */
201
+ a_bits = get_alignment_bits(opc);
202
+ if (a_bits) {
203
+ tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
204
+ }
205
rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
206
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
207
tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
208
--
100
--
209
2.25.1
101
2.43.0
210
211
diff view generated by jsdifflib
1
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
1
No need to open-code 64x64->128-bit multiplication.
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
tcg/aarch64/tcg-target.h | 2 -
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
5
tcg/aarch64/tcg-target.c.inc | 91 +++++++++++++++++++++++++++++-------
7
1 file changed, 3 insertions(+), 29 deletions(-)
6
2 files changed, 74 insertions(+), 19 deletions(-)
7
8
8
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/aarch64/tcg-target.h
11
--- a/target/hexagon/fma_emu.c
11
+++ b/tcg/aarch64/tcg-target.h
12
+++ b/target/hexagon/fma_emu.c
12
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
13
14
return -1;
14
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
15
16
-#ifdef CONFIG_SOFTMMU
17
#define TCG_TARGET_NEED_LDST_LABELS
18
-#endif
19
#define TCG_TARGET_NEED_POOL_LABELS
20
21
#endif /* AARCH64_TCG_TARGET_H */
22
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/aarch64/tcg-target.c.inc
25
+++ b/tcg/aarch64/tcg-target.c.inc
26
@@ -XXX,XX +XXX,XX @@
27
* See the COPYING file in the top-level directory for details.
28
*/
29
30
+#include "../tcg-ldst.c.inc"
31
#include "../tcg-pool.c.inc"
32
#include "qemu/bitops.h"
33
34
@@ -XXX,XX +XXX,XX @@ typedef enum {
35
I3404_ANDI = 0x12000000,
36
I3404_ORRI = 0x32000000,
37
I3404_EORI = 0x52000000,
38
+ I3404_ANDSI = 0x72000000,
39
40
/* Move wide immediate instructions. */
41
I3405_MOVN = 0x12800000,
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
43
if (offset == sextract64(offset, 0, 26)) {
44
tcg_out_insn(s, 3206, B, offset);
45
} else {
46
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
47
- tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
48
+ /* Choose X9 as a call-clobbered non-LR temporary. */
49
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target);
50
+ tcg_out_insn(s, 3207, BR, TCG_REG_X9);
51
}
52
}
15
}
53
16
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
17
-static uint32_t int128_getw0(Int128 x)
55
}
56
}
57
58
-#ifdef CONFIG_SOFTMMU
59
-#include "../tcg-ldst.c.inc"
60
+static void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
61
+{
62
+ ptrdiff_t offset = tcg_pcrel_diff(s, target);
63
+ tcg_debug_assert(offset == sextract64(offset, 0, 21));
64
+ tcg_out_insn(s, 3406, ADR, rd, offset);
65
+}
66
67
+#ifdef CONFIG_SOFTMMU
68
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
69
* MemOpIdx oi, uintptr_t ra)
70
*/
71
@@ -XXX,XX +XXX,XX @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
72
#endif
73
};
74
75
-static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
76
-{
18
-{
77
- ptrdiff_t offset = tcg_pcrel_diff(s, target);
19
- return int128_getlo(x);
78
- tcg_debug_assert(offset == sextract64(offset, 0, 21));
79
- tcg_out_insn(s, 3406, ADR, rd, offset);
80
-}
20
-}
81
-
21
-
82
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
22
-static uint32_t int128_getw1(Int128 x)
23
-{
24
- return int128_getlo(x) >> 32;
25
-}
26
-
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
83
{
28
{
84
MemOpIdx oi = lb->oi;
29
- Int128 a, b;
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
86
tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
31
+ uint64_t l, h;
32
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
39
-
40
- pp1s = pp1a + pp1b;
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
42
- pp2 += (1ULL << 32);
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
47
- }
48
-
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
50
+ mulu64(&l, &h, ai, bi);
51
+ return int128_make128(l, h);
87
}
52
}
88
53
89
+#else
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
90
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
91
+ unsigned a_bits)
92
+{
93
+ unsigned a_mask = (1 << a_bits) - 1;
94
+ TCGLabelQemuLdst *label = new_ldst_label(s);
95
+
96
+ label->is_ld = is_ld;
97
+ label->addrlo_reg = addr_reg;
98
+
99
+ /* tst addr, #mask */
100
+ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, a_mask);
101
+
102
+ label->label_ptr[0] = s->code_ptr;
103
+
104
+ /* b.ne slow_path */
105
+ tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
106
+
107
+ label->raddr = tcg_splitwx_to_rx(s->code_ptr);
108
+}
109
+
110
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
111
+{
112
+ if (!reloc_pc19(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
113
+ return false;
114
+ }
115
+
116
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_X1, l->addrlo_reg);
117
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
118
+
119
+ /* "Tail call" to the helper, with the return address back inline. */
120
+ tcg_out_adr(s, TCG_REG_LR, l->raddr);
121
+ tcg_out_goto_long(s, (const void *)(l->is_ld ? helper_unaligned_ld
122
+ : helper_unaligned_st));
123
+ return true;
124
+}
125
+
126
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
127
+{
128
+ return tcg_out_fail_alignment(s, l);
129
+}
130
+
131
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
132
+{
133
+ return tcg_out_fail_alignment(s, l);
134
+}
135
#endif /* CONFIG_SOFTMMU */
136
137
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
138
TCGReg data_r, TCGReg addr_r,
139
TCGType otype, TCGReg off_r)
140
{
141
- /* Byte swapping is left to middle-end expansion. */
142
- tcg_debug_assert((memop & MO_BSWAP) == 0);
143
-
144
switch (memop & MO_SSIZE) {
145
case MO_UB:
146
tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
147
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
148
TCGReg data_r, TCGReg addr_r,
149
TCGType otype, TCGReg off_r)
150
{
151
- /* Byte swapping is left to middle-end expansion. */
152
- tcg_debug_assert((memop & MO_BSWAP) == 0);
153
-
154
switch (memop & MO_SIZE) {
155
case MO_8:
156
tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
157
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
158
{
159
MemOp memop = get_memop(oi);
160
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
161
+
162
+ /* Byte swapping is left to middle-end expansion. */
163
+ tcg_debug_assert((memop & MO_BSWAP) == 0);
164
+
165
#ifdef CONFIG_SOFTMMU
166
unsigned mem_index = get_mmuidx(oi);
167
tcg_insn_unit *label_ptr;
168
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
169
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
170
s->code_ptr, label_ptr);
171
#else /* !CONFIG_SOFTMMU */
172
+ unsigned a_bits = get_alignment_bits(memop);
173
+ if (a_bits) {
174
+ tcg_out_test_alignment(s, true, addr_reg, a_bits);
175
+ }
176
if (USE_GUEST_BASE) {
177
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
178
TCG_REG_GUEST_BASE, otype, addr_reg);
179
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
180
{
181
MemOp memop = get_memop(oi);
182
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
183
+
184
+ /* Byte swapping is left to middle-end expansion. */
185
+ tcg_debug_assert((memop & MO_BSWAP) == 0);
186
+
187
#ifdef CONFIG_SOFTMMU
188
unsigned mem_index = get_mmuidx(oi);
189
tcg_insn_unit *label_ptr;
190
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
191
add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
192
data_reg, addr_reg, s->code_ptr, label_ptr);
193
#else /* !CONFIG_SOFTMMU */
194
+ unsigned a_bits = get_alignment_bits(memop);
195
+ if (a_bits) {
196
+ tcg_out_test_alignment(s, false, addr_reg, a_bits);
197
+ }
198
if (USE_GUEST_BASE) {
199
tcg_out_qemu_st_direct(s, memop, data_reg,
200
TCG_REG_GUEST_BASE, otype, addr_reg);
201
--
55
--
202
2.25.1
56
2.43.0
203
204
diff view generated by jsdifflib
1
From: WANG Xuerui <git@xen0n.name>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
2
3
3
Apparently we were left behind; just renaming MO_Q to MO_UQ is enough.
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
Fixes: fc313c64345453c7 ("exec/memop: Adding signedness to quad definitions")
6
Signed-off-by: WANG Xuerui <git@xen0n.name>
7
Message-Id: <20220206162106.1092364-1-i.qemu@xen0n.name>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
tcg/loongarch64/tcg-target.c.inc | 2 +-
7
target/hexagon/fma_emu.c | 2 +-
11
1 file changed, 1 insertion(+), 1 deletion(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
12
9
13
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/loongarch64/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.c
16
+++ b/tcg/loongarch64/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.c
17
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
18
case MO_SL:
15
float64_is_infinity(b)) {
19
tcg_out_opc_ldx_w(s, rd, rj, rk);
16
return float64_mul(a, b, fp_status);
20
break;
17
}
21
- case MO_Q:
18
- x.mant = int128_mul_6464(accumulated, 1);
22
+ case MO_UQ:
19
+ x.mant = int128_make64(accumulated);
23
tcg_out_opc_ldx_d(s, rd, rj, rk);
20
x.sticky = sticky;
24
break;
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
25
default:
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
26
--
23
--
27
2.25.1
24
2.43.0
28
29
diff view generated by jsdifflib
1
From: Pavel Dovgalyuk <pavel.dovgalyuk@ispras.ru>
1
Convert all targets simultaneously, as the gen_intermediate_code
2
function disappears from the target. While there are possible
3
workarounds, they're larger than simply performing the conversion.
2
4
3
Commit aff0e204cb1f1c036a496c94c15f5dfafcd9b4b4 introduced CF_NOIRQ usage,
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
but one case was forgotten. Record/replay uses one special TB which is not
5
really executed, but used to cause a correct exception in replay mode.
6
This patch adds CF_NOIRQ flag for such block.
7
8
Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <164362834054.1754532.7678416881159817273.stgit@pasha-ThinkPad-X280>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
7
---
13
accel/tcg/cpu-exec.c | 3 ++-
8
include/exec/translator.h | 14 --------------
14
1 file changed, 2 insertions(+), 1 deletion(-)
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
10
target/alpha/cpu.h | 2 ++
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
15
71
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
73
index XXXXXXX..XXXXXXX 100644
74
--- a/include/exec/translator.h
75
+++ b/include/exec/translator.h
76
@@ -XXX,XX +XXX,XX @@
77
#include "qemu/bswap.h"
78
#include "exec/vaddr.h"
79
80
-/**
81
- * gen_intermediate_code
82
- * @cpu: cpu context
83
- * @tb: translation block
84
- * @max_insns: max number of instructions to translate
85
- * @pc: guest virtual program counter address
86
- * @host_pc: host physical program counter address
87
- *
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
90
- */
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
93
-
94
/**
95
* DisasJumpType:
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
16
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
17
index XXXXXXX..XXXXXXX 100644
370
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/cpu-exec.c
371
--- a/accel/tcg/cpu-exec.c
19
+++ b/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
20
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
21
if (replay_has_exception()
374
22
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
375
if (!tcg_target_initialized) {
23
/* Execute just one insn to trigger exception pending in the log */
376
/* Check mandatory TCGCPUOps handlers */
24
- cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
25
+ cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
378
#ifndef CONFIG_USER_ONLY
26
+ | CF_NOIRQ | 1;
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
27
}
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
391
index XXXXXXX..XXXXXXX 100644
392
--- a/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
408
/*
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
433
.tb_stop = alpha_tr_tb_stop,
434
};
435
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
28
#endif
563
#endif
29
return false;
564
};
565
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
567
- vaddr pc, void *host_pc)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
569
+ int *max_insns, vaddr pc, void *host_pc)
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
30
--
953
--
31
2.25.1
954
2.43.0
32
955
33
956
diff view generated by jsdifflib