1
The following changes since commit e93ded1bf6c94ab95015b33e188bc8b0b0c32670:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge tag 'testing-pull-request-2022-08-30' of https://gitlab.com/thuth/qemu into staging (2022-08-31 18:19:03 -0400)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220901
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to 20011be2e30b8aa8ef1fc258485f00c688703deb:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
target/riscv: Make translator stop before the end of a page (2022-09-01 07:43:08 +0100)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Respect PROT_EXEC in user-only mode.
14
tcg/optimize: Remove in-flight mask data from OptContext
15
Fix s390x, i386 and riscv for translations crossing a page.
15
fpu: Add float*_muladd_scalbn
16
fpu: Remove float_muladd_halve_result
17
fpu: Add float_round_nearest_even_max
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
16
21
17
----------------------------------------------------------------
22
----------------------------------------------------------------
18
Ilya Leoshkevich (4):
23
Ilya Leoshkevich (1):
19
linux-user: Clear translations on mprotect()
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
20
accel/tcg: Introduce is_same_page()
21
target/s390x: Make translator stop before the end of a page
22
target/i386: Make translator stop before the end of a page
23
25
24
Richard Henderson (16):
26
Pierrick Bouvier (1):
25
linux-user/arm: Mark the commpage executable
27
plugins: optimize cpu_index code generation
26
linux-user/hppa: Allocate page zero as a commpage
27
linux-user/x86_64: Allocate vsyscall page as a commpage
28
linux-user: Honor PT_GNU_STACK
29
tests/tcg/i386: Move smc_code2 to an executable section
30
accel/tcg: Properly implement get_page_addr_code for user-only
31
accel/tcg: Unlock mmap_lock after longjmp
32
accel/tcg: Make tb_htable_lookup static
33
accel/tcg: Move qemu_ram_addr_from_host_nofail to physmem.c
34
accel/tcg: Use probe_access_internal for softmmu get_page_addr_code_hostp
35
accel/tcg: Document the faulting lookup in tb_lookup_cmp
36
accel/tcg: Remove translator_ldsw
37
accel/tcg: Add pc and host_pc params to gen_intermediate_code
38
accel/tcg: Add fast path for translator_ld*
39
target/riscv: Add MAX_INSN_LEN and insn_len
40
target/riscv: Make translator stop before the end of a page
41
28
42
include/elf.h | 1 +
29
Richard Henderson (70):
43
include/exec/cpu-common.h | 1 +
30
tcg/optimize: Split out finish_bb, finish_ebb
44
include/exec/exec-all.h | 89 ++++++++----------------
31
tcg/optimize: Split out fold_affected_mask
45
include/exec/translator.h | 96 ++++++++++++++++---------
32
tcg/optimize: Copy mask writeback to fold_masks
46
linux-user/arm/target_cpu.h | 4 +-
33
tcg/optimize: Split out fold_masks_zs
47
linux-user/qemu.h | 1 +
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
48
accel/tcg/cpu-exec.c | 143 ++++++++++++++++++++------------------
35
tcg/optimize: Change representation of s_mask
49
accel/tcg/cputlb.c | 93 +++++++------------------
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
50
accel/tcg/translate-all.c | 29 ++++----
37
tcg/optimize: Introduce const value accessors for TempOptInfo
51
accel/tcg/translator.c | 135 ++++++++++++++++++++++++++---------
38
tcg/optimize: Use fold_masks_zs in fold_and
52
accel/tcg/user-exec.c | 17 ++++-
39
tcg/optimize: Use fold_masks_zs in fold_andc
53
linux-user/elfload.c | 82 ++++++++++++++++++++--
40
tcg/optimize: Use fold_masks_zs in fold_bswap
54
linux-user/mmap.c | 6 +-
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
55
softmmu/physmem.c | 12 ++++
42
tcg/optimize: Use fold_masks_z in fold_ctpop
56
target/alpha/translate.c | 5 +-
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
57
target/arm/translate.c | 5 +-
44
tcg/optimize: Compute sign mask in fold_deposit
58
target/avr/translate.c | 5 +-
45
tcg/optimize: Use finish_folding in fold_divide
59
target/cris/translate.c | 5 +-
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
60
target/hexagon/translate.c | 6 +-
47
tcg/optimize: Use fold_masks_s in fold_eqv
61
target/hppa/translate.c | 5 +-
48
tcg/optimize: Use fold_masks_z in fold_extract
62
target/i386/tcg/translate.c | 71 +++++++++++--------
49
tcg/optimize: Use finish_folding in fold_extract2
63
target/loongarch/translate.c | 6 +-
50
tcg/optimize: Use fold_masks_zs in fold_exts
64
target/m68k/translate.c | 5 +-
51
tcg/optimize: Use fold_masks_z in fold_extu
65
target/microblaze/translate.c | 5 +-
52
tcg/optimize: Use fold_masks_zs in fold_movcond
66
target/mips/tcg/translate.c | 5 +-
53
tcg/optimize: Use finish_folding in fold_mul*
67
target/nios2/translate.c | 5 +-
54
tcg/optimize: Use fold_masks_s in fold_nand
68
target/openrisc/translate.c | 6 +-
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
69
target/ppc/translate.c | 5 +-
56
tcg/optimize: Use fold_masks_s in fold_nor
70
target/riscv/translate.c | 32 +++++++--
57
tcg/optimize: Use fold_masks_s in fold_not
71
target/rx/translate.c | 5 +-
58
tcg/optimize: Use fold_masks_zs in fold_or
72
target/s390x/tcg/translate.c | 20 ++++--
59
tcg/optimize: Use fold_masks_zs in fold_orc
73
target/sh4/translate.c | 5 +-
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
74
target/sparc/translate.c | 5 +-
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
75
target/tricore/translate.c | 6 +-
62
tcg/optimize: Use finish_folding in fold_remainder
76
target/xtensa/translate.c | 6 +-
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
77
tests/tcg/i386/test-i386.c | 2 +-
64
tcg/optimize: Use fold_masks_z in fold_setcond
78
tests/tcg/riscv64/noexec.c | 79 +++++++++++++++++++++
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
79
tests/tcg/s390x/noexec.c | 106 ++++++++++++++++++++++++++++
66
tcg/optimize: Use fold_masks_z in fold_setcond2
80
tests/tcg/x86_64/noexec.c | 75 ++++++++++++++++++++
67
tcg/optimize: Use finish_folding in fold_cmp_vec
81
tests/tcg/multiarch/noexec.c.inc | 139 ++++++++++++++++++++++++++++++++++++
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
82
tests/tcg/riscv64/Makefile.target | 1 +
69
tcg/optimize: Use fold_masks_zs in fold_sextract
83
tests/tcg/s390x/Makefile.target | 1 +
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
84
tests/tcg/x86_64/Makefile.target | 3 +-
71
tcg/optimize: Simplify sign bit test in fold_shift
85
43 files changed, 966 insertions(+), 367 deletions(-)
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
86
create mode 100644 tests/tcg/riscv64/noexec.c
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
87
create mode 100644 tests/tcg/s390x/noexec.c
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
88
create mode 100644 tests/tcg/x86_64/noexec.c
75
tcg/optimize: Use fold_masks_zs in fold_xor
89
create mode 100644 tests/tcg/multiarch/noexec.c.inc
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
100
101
include/exec/translator.h | 14 -
102
include/fpu/softfloat-types.h | 2 +
103
include/fpu/softfloat.h | 14 +-
104
include/hw/core/tcg-cpu-ops.h | 13 +
105
target/alpha/cpu.h | 2 +
106
target/arm/internals.h | 2 +
107
target/avr/cpu.h | 2 +
108
target/hexagon/cpu.h | 2 +
109
target/hexagon/fma_emu.h | 3 -
110
target/hppa/cpu.h | 2 +
111
target/i386/tcg/helper-tcg.h | 2 +
112
target/loongarch/internals.h | 2 +
113
target/m68k/cpu.h | 2 +
114
target/microblaze/cpu.h | 2 +
115
target/mips/tcg/tcg-internal.h | 2 +
116
target/openrisc/cpu.h | 2 +
117
target/ppc/cpu.h | 2 +
118
target/riscv/cpu.h | 3 +
119
target/rx/cpu.h | 2 +
120
target/s390x/s390x-internal.h | 2 +
121
target/sh4/cpu.h | 2 +
122
target/sparc/cpu.h | 2 +
123
target/sparc/helper.h | 4 +-
124
target/tricore/cpu.h | 2 +
125
target/xtensa/cpu.h | 2 +
126
accel/tcg/cpu-exec.c | 8 +-
127
accel/tcg/plugin-gen.c | 9 +
128
accel/tcg/translate-all.c | 8 +-
129
fpu/softfloat.c | 63 +--
130
target/alpha/cpu.c | 1 +
131
target/alpha/translate.c | 4 +-
132
target/arm/cpu.c | 1 +
133
target/arm/tcg/cpu-v7m.c | 1 +
134
target/arm/tcg/helper-a64.c | 6 +-
135
target/arm/tcg/translate.c | 5 +-
136
target/avr/cpu.c | 1 +
137
target/avr/translate.c | 6 +-
138
target/hexagon/cpu.c | 1 +
139
target/hexagon/fma_emu.c | 496 ++++++---------------
140
target/hexagon/op_helper.c | 125 ++----
141
target/hexagon/translate.c | 4 +-
142
target/hppa/cpu.c | 1 +
143
target/hppa/translate.c | 4 +-
144
target/i386/tcg/tcg-cpu.c | 1 +
145
target/i386/tcg/translate.c | 5 +-
146
target/loongarch/cpu.c | 1 +
147
target/loongarch/tcg/translate.c | 4 +-
148
target/m68k/cpu.c | 1 +
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
2
3
Introduce a function that checks whether a given address is on the same
3
make check-tcg fails on Fedora with the following error message:
4
page as where disassembly started. Having it improves readability of
5
the following patches.
6
4
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
8
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Message-Id: <20220811095534.241224-3-iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
11
[rth: Make the DisasContextBase parameter const.]
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
24
---
14
include/exec/translator.h | 10 ++++++++++
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
15
1 file changed, 10 insertions(+)
26
1 file changed, 4 insertions(+), 5 deletions(-)
16
27
17
diff --git a/include/exec/translator.h b/include/exec/translator.h
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
18
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/translator.h
30
--- a/tests/tcg/multiarch/system/memory.c
20
+++ b/include/exec/translator.h
31
+++ b/tests/tcg/multiarch/system/memory.c
21
@@ -XXX,XX +XXX,XX @@ FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
32
@@ -XXX,XX +XXX,XX @@
22
33
23
#undef GEN_TRANSLATOR_LD
34
#include <stdint.h>
24
35
#include <stdbool.h>
25
+/*
36
-#include <inttypes.h>
26
+ * Return whether addr is on the same page as where disassembly started.
37
#include <minilib.h>
27
+ * Translators can use this to enforce the rule that only single-insn
38
28
+ * translation blocks are allowed to cross page boundaries.
39
#ifndef CHECK_UNALIGNED
29
+ */
40
@@ -XXX,XX +XXX,XX @@ int main(void)
30
+static inline bool is_same_page(const DisasContextBase *db, target_ulong addr)
41
int i;
31
+{
42
bool ok = true;
32
+ return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
43
33
+}
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
34
+
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
35
#endif /* EXEC__TRANSLATOR_H */
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
36
--
62
--
37
2.34.1
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
Right now the translator stops right *after* the end of a page, which
1
Call them directly from the opcode switch statement in tcg_optimize,
2
breaks reporting of fault locations when the last instruction of a
2
rather than in finish_folding based on opcode flags. Adjust folding
3
multi-insn translation block crosses a page boundary.
3
of conditional branches to match.
4
4
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1155
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
target/riscv/translate.c | 17 +++++--
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
12
tests/tcg/riscv64/noexec.c | 79 +++++++++++++++++++++++++++++++
9
1 file changed, 31 insertions(+), 16 deletions(-)
13
tests/tcg/riscv64/Makefile.target | 1 +
14
3 files changed, 93 insertions(+), 4 deletions(-)
15
create mode 100644 tests/tcg/riscv64/noexec.c
16
10
17
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/translate.c
13
--- a/tcg/optimize.c
20
+++ b/target/riscv/translate.c
14
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
22
}
23
ctx->nftemp = 0;
24
25
+ /* Only the first insn within a TB is allowed to cross a page boundary. */
26
if (ctx->base.is_jmp == DISAS_NEXT) {
27
- target_ulong page_start;
28
-
29
- page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
30
- if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
31
+ if (!is_same_page(&ctx->base, ctx->base.pc_next)) {
32
ctx->base.is_jmp = DISAS_TOO_MANY;
33
+ } else {
34
+ unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
35
+
36
+ if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) {
37
+ uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next);
38
+ int len = insn_len(next_insn);
39
+
40
+ if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) {
41
+ ctx->base.is_jmp = DISAS_TOO_MANY;
42
+ }
43
+ }
44
}
45
}
16
}
46
}
17
}
47
diff --git a/tests/tcg/riscv64/noexec.c b/tests/tcg/riscv64/noexec.c
18
48
new file mode 100644
19
+static void finish_bb(OptContext *ctx)
49
index XXXXXXX..XXXXXXX
50
--- /dev/null
51
+++ b/tests/tcg/riscv64/noexec.c
52
@@ -XXX,XX +XXX,XX @@
53
+#include "../multiarch/noexec.c.inc"
54
+
55
+static void *arch_mcontext_pc(const mcontext_t *ctx)
56
+{
20
+{
57
+ return (void *)ctx->__gregs[REG_PC];
21
+ /* We only optimize memory barriers across basic blocks. */
22
+ ctx->prev_mb = NULL;
58
+}
23
+}
59
+
24
+
60
+static int arch_mcontext_arg(const mcontext_t *ctx)
25
+static void finish_ebb(OptContext *ctx)
61
+{
26
+{
62
+ return ctx->__gregs[REG_A0];
27
+ finish_bb(ctx);
28
+ /* We only optimize across extended basic blocks. */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
30
+ remove_mem_copy_all(ctx);
63
+}
31
+}
64
+
32
+
65
+static void arch_flush(void *p, int len)
33
static void finish_folding(OptContext *ctx, TCGOp *op)
66
+{
34
{
67
+ __builtin___clear_cache(p, p + len);
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
68
+}
36
int i, nb_oargs;
37
38
- /*
39
- * We only optimize extended basic blocks. If the opcode ends a BB
40
- * and is not a conditional branch, reset all temp data.
41
- */
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
47
- }
48
- return;
49
- }
50
-
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
64
}
65
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
68
}
69
op->opc = INDEX_op_br;
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
69
+
76
+
70
+extern char noexec_1[];
77
+ finish_bb(ctx);
71
+extern char noexec_2[];
78
+ return true;
72
+extern char noexec_end[];
79
}
73
+
80
74
+asm(".option push\n"
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
75
+ ".option norvc\n"
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
76
+ "noexec_1:\n"
83
CASE_OP_32_64_VEC(xor):
77
+ " li a0,1\n" /* a0 is 0 on entry, set 1. */
84
done = fold_xor(&ctx, op);
78
+ "noexec_2:\n"
85
break;
79
+ " li a0,2\n" /* a0 is 0/1; set 2. */
86
+ case INDEX_op_set_label:
80
+ " ret\n"
87
+ case INDEX_op_br:
81
+ "noexec_end:\n"
88
+ case INDEX_op_exit_tb:
82
+ ".option pop");
89
+ case INDEX_op_goto_tb:
83
+
90
+ case INDEX_op_goto_ptr:
84
+int main(void)
91
+ finish_ebb(&ctx);
85
+{
92
+ done = true;
86
+ struct noexec_test noexec_tests[] = {
93
+ break;
87
+ {
94
default:
88
+ .name = "fallthrough",
95
break;
89
+ .test_code = noexec_1,
96
}
90
+ .test_len = noexec_end - noexec_1,
91
+ .page_ofs = noexec_1 - noexec_2,
92
+ .entry_ofs = noexec_1 - noexec_2,
93
+ .expected_si_ofs = 0,
94
+ .expected_pc_ofs = 0,
95
+ .expected_arg = 1,
96
+ },
97
+ {
98
+ .name = "jump",
99
+ .test_code = noexec_1,
100
+ .test_len = noexec_end - noexec_1,
101
+ .page_ofs = noexec_1 - noexec_2,
102
+ .entry_ofs = 0,
103
+ .expected_si_ofs = 0,
104
+ .expected_pc_ofs = 0,
105
+ .expected_arg = 0,
106
+ },
107
+ {
108
+ .name = "fallthrough [cross]",
109
+ .test_code = noexec_1,
110
+ .test_len = noexec_end - noexec_1,
111
+ .page_ofs = noexec_1 - noexec_2 - 2,
112
+ .entry_ofs = noexec_1 - noexec_2 - 2,
113
+ .expected_si_ofs = 0,
114
+ .expected_pc_ofs = -2,
115
+ .expected_arg = 1,
116
+ },
117
+ {
118
+ .name = "jump [cross]",
119
+ .test_code = noexec_1,
120
+ .test_len = noexec_end - noexec_1,
121
+ .page_ofs = noexec_1 - noexec_2 - 2,
122
+ .entry_ofs = -2,
123
+ .expected_si_ofs = 0,
124
+ .expected_pc_ofs = -2,
125
+ .expected_arg = 0,
126
+ },
127
+ };
128
+
129
+ return test_noexec(noexec_tests,
130
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
131
+}
132
diff --git a/tests/tcg/riscv64/Makefile.target b/tests/tcg/riscv64/Makefile.target
133
index XXXXXXX..XXXXXXX 100644
134
--- a/tests/tcg/riscv64/Makefile.target
135
+++ b/tests/tcg/riscv64/Makefile.target
136
@@ -XXX,XX +XXX,XX @@
137
138
VPATH += $(SRC_PATH)/tests/tcg/riscv64
139
TESTS += test-div
140
+TESTS += noexec
141
--
97
--
142
2.34.1
98
2.43.0
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
New patch
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 21 ++++++---------------
10
1 file changed, 6 insertions(+), 15 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
17
* Record "zero" and "sign" masks for the single output of @op.
18
* See TempOptInfo definition of z_mask and s_mask.
19
* If z_mask allows, fold the output to constant zero.
20
+ * The passed s_mask may be augmented by z_mask.
21
*/
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
uint64_t z_mask, uint64_t s_mask)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
26
ti = ts_info(ts);
27
ti->z_mask = z_mask;
28
- ti->s_mask = s_mask;
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
return true;
31
}
32
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
34
default:
35
g_assert_not_reached();
36
}
37
- s_mask = smask_from_zmask(z_mask);
38
39
+ s_mask = 0;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
41
case TCG_BSWAP_OZ:
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
57
}
58
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
60
default:
61
g_assert_not_reached();
62
}
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
64
return false;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
68
return true;
69
}
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
76
}
77
78
ctx->z_mask = z_mask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
106
}
107
108
ctx->z_mask = 1;
109
- ctx->s_mask = smask_from_zmask(1);
110
return false;
111
112
do_setcond_const:
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
114
break;
115
CASE_OP_32_64(ld8u):
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
118
break;
119
CASE_OP_32_64(ld16s):
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
121
break;
122
CASE_OP_32_64(ld16u):
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
125
break;
126
case INDEX_op_ld32s_i64:
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
128
break;
129
case INDEX_op_ld32u_i64:
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
132
break;
133
default:
134
g_assert_not_reached();
135
--
136
2.43.0
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means
1
Change the representation from sign bit repetitions to all bits equal
2
that we've got to mark the commpage executable. We had
2
to the sign bit, including the sign bit itself.
3
been placing the commpage outside of reserved_va, which
4
was incorrect and lead to an abort.
5
3
6
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
The previous format has a problem in that it is difficult to recreate
7
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
23
---
10
linux-user/arm/target_cpu.h | 4 ++--
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
11
linux-user/elfload.c | 6 +++++-
25
1 file changed, 15 insertions(+), 49 deletions(-)
12
2 files changed, 7 insertions(+), 3 deletions(-)
13
26
14
diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/arm/target_cpu.h
29
--- a/tcg/optimize.c
17
+++ b/linux-user/arm/target_cpu.h
30
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
33
uint64_t val;
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
37
} TempOptInfo;
38
39
typedef struct OptContext {
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
41
42
/* In flight values from optimization. */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
47
} OptContext;
48
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
51
-{
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
54
-}
55
-
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
74
-}
75
-
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
89
{
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
19
} else {
97
} else {
20
/*
98
ti->is_const = false;
21
* We need to be able to map the commpage.
99
ti->z_mask = -1;
22
- * See validate_guest_space in linux-user/elfload.c.
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
23
+ * See init_guest_commpage in linux-user/elfload.c.
24
*/
101
*/
25
- return 0xffff0000ul;
102
if (i == 0) {
26
+ return 0xfffffffful;
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
27
}
106
}
28
}
107
}
29
#define MAX_RESERVED_VA arm_max_reserved_va
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
30
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
109
* The passed s_mask may be augmented by z_mask.
31
index XXXXXXX..XXXXXXX 100644
110
*/
32
--- a/linux-user/elfload.c
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
33
+++ b/linux-user/elfload.c
112
- uint64_t z_mask, uint64_t s_mask)
34
@@ -XXX,XX +XXX,XX @@ enum {
113
+ uint64_t z_mask, int64_t s_mask)
35
36
static bool init_guest_commpage(void)
37
{
114
{
38
- void *want = g2h_untagged(HI_COMMPAGE & -qemu_host_page_size);
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
39
+ abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
116
TCGTemp *ts;
40
+ void *want = g2h_untagged(commpage);
117
TempOptInfo *ti;
41
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
118
+ int rep;
42
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
119
43
120
/* Only single-output opcodes are supported here. */
44
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
121
tcg_debug_assert(def->nb_oargs == 1);
45
perror("Protecting guest commpage");
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
46
exit(EXIT_FAILURE);
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
125
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
47
}
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
48
+
136
+
49
+ page_set_flags(commpage, commpage + qemu_host_page_size,
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
50
+ PAGE_READ | PAGE_EXEC | PAGE_VALID);
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
142
+
51
return true;
143
return true;
52
}
144
}
53
145
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
147
148
ctx->z_mask = z_mask;
149
ctx->s_mask = s_mask;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
152
return true;
153
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
157
ctx->s_mask = s_mask;
158
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
166
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
168
- ctx->s_mask = smask_from_smask(s_mask);
169
170
return fold_masks(ctx, op);
171
}
54
--
172
--
55
2.34.1
173
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 9 +++++----
5
1 file changed, 5 insertions(+), 4 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
12
remove_mem_copy_all(ctx);
13
}
14
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
17
{
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
19
int i, nb_oargs;
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
21
ts_info(ts)->z_mask = ctx->z_mask;
22
}
23
}
24
+ return true;
25
}
26
27
/*
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
29
fold_xi_to_x(ctx, op, 0)) {
30
return true;
31
}
32
- return false;
33
+ return finish_folding(ctx, op);
34
}
35
36
/* We cannot as yet do_constant_folding with vectors. */
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
38
fold_xi_to_x(ctx, op, 0)) {
39
return true;
40
}
41
- return false;
42
+ return finish_folding(ctx, op);
43
}
44
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
47
op->args[4] = arg_new_constant(ctx, bl);
48
op->args[5] = arg_new_constant(ctx, bh);
49
}
50
- return false;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
55
--
56
2.43.0
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
2
3
Right now translator stops right *after* the end of a page, which
4
breaks reporting of fault locations when the last instruction of a
5
multi-insn translation block crosses a page boundary.
6
7
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20220817150506.592862-3-iii@linux.ibm.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
4
---
12
target/s390x/tcg/translate.c | 15 +++-
5
tcg/optimize.c | 20 +++++++++++++++++---
13
tests/tcg/s390x/noexec.c | 106 +++++++++++++++++++++++
6
1 file changed, 17 insertions(+), 3 deletions(-)
14
tests/tcg/multiarch/noexec.c.inc | 139 +++++++++++++++++++++++++++++++
15
tests/tcg/s390x/Makefile.target | 1 +
16
4 files changed, 257 insertions(+), 4 deletions(-)
17
create mode 100644 tests/tcg/s390x/noexec.c
18
create mode 100644 tests/tcg/multiarch/noexec.c.inc
19
7
20
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
21
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
22
--- a/target/s390x/tcg/translate.c
10
--- a/tcg/optimize.c
23
+++ b/target/s390x/tcg/translate.c
11
+++ b/tcg/optimize.c
24
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
25
dc->insn_start = tcg_last_op();
13
return ts_info(arg_temp(arg));
26
}
14
}
27
15
28
+static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
16
+static inline bool ti_is_const(TempOptInfo *ti)
29
+ uint64_t pc)
30
+{
17
+{
31
+ uint64_t insn = ld_code2(env, s, pc);
18
+ return ti->is_const;
32
+
33
+ return pc + get_ilen((insn >> 8) & 0xff);
34
+}
19
+}
35
+
20
+
36
static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
37
{
38
CPUS390XState *env = cs->env_ptr;
39
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
40
41
dc->base.is_jmp = translate_one(env, dc);
42
if (dc->base.is_jmp == DISAS_NEXT) {
43
- uint64_t page_start;
44
-
45
- page_start = dc->base.pc_first & TARGET_PAGE_MASK;
46
- if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
47
+ if (!is_same_page(dcbase, dc->base.pc_next) ||
48
+ !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next)) ||
49
+ dc->ex_value) {
50
dc->base.is_jmp = DISAS_TOO_MANY;
51
}
52
}
53
diff --git a/tests/tcg/s390x/noexec.c b/tests/tcg/s390x/noexec.c
54
new file mode 100644
55
index XXXXXXX..XXXXXXX
56
--- /dev/null
57
+++ b/tests/tcg/s390x/noexec.c
58
@@ -XXX,XX +XXX,XX @@
59
+#include "../multiarch/noexec.c.inc"
60
+
61
+static void *arch_mcontext_pc(const mcontext_t *ctx)
62
+{
22
+{
63
+ return (void *)ctx->psw.addr;
23
+ return ti->val;
64
+}
24
+}
65
+
25
+
66
+static int arch_mcontext_arg(const mcontext_t *ctx)
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
67
+{
27
+{
68
+ return ctx->gregs[2];
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
69
+}
29
+}
70
+
30
+
71
+static void arch_flush(void *p, int len)
31
static inline bool ts_is_const(TCGTemp *ts)
72
+{
32
{
73
+}
33
- return ts_info(ts)->is_const;
74
+
34
+ return ti_is_const(ts_info(ts));
75
+extern char noexec_1[];
35
}
76
+extern char noexec_2[];
36
77
+extern char noexec_end[];
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
78
+
38
{
79
+asm("noexec_1:\n"
39
- TempOptInfo *ti = ts_info(ts);
80
+ " lgfi %r2,1\n" /* %r2 is 0 on entry, set 1. */
40
- return ti->is_const && ti->val == val;
81
+ "noexec_2:\n"
41
+ return ti_is_const_val(ts_info(ts), val);
82
+ " lgfi %r2,2\n" /* %r2 is 0/1; set 2. */
42
}
83
+ " br %r14\n" /* return */
43
84
+ "noexec_end:");
44
static inline bool arg_is_const(TCGArg arg)
85
+
86
+extern char exrl_1[];
87
+extern char exrl_2[];
88
+extern char exrl_end[];
89
+
90
+asm("exrl_1:\n"
91
+ " exrl %r0, exrl_2\n"
92
+ " br %r14\n"
93
+ "exrl_2:\n"
94
+ " lgfi %r2,2\n"
95
+ "exrl_end:");
96
+
97
+int main(void)
98
+{
99
+ struct noexec_test noexec_tests[] = {
100
+ {
101
+ .name = "fallthrough",
102
+ .test_code = noexec_1,
103
+ .test_len = noexec_end - noexec_1,
104
+ .page_ofs = noexec_1 - noexec_2,
105
+ .entry_ofs = noexec_1 - noexec_2,
106
+ .expected_si_ofs = 0,
107
+ .expected_pc_ofs = 0,
108
+ .expected_arg = 1,
109
+ },
110
+ {
111
+ .name = "jump",
112
+ .test_code = noexec_1,
113
+ .test_len = noexec_end - noexec_1,
114
+ .page_ofs = noexec_1 - noexec_2,
115
+ .entry_ofs = 0,
116
+ .expected_si_ofs = 0,
117
+ .expected_pc_ofs = 0,
118
+ .expected_arg = 0,
119
+ },
120
+ {
121
+ .name = "exrl",
122
+ .test_code = exrl_1,
123
+ .test_len = exrl_end - exrl_1,
124
+ .page_ofs = exrl_1 - exrl_2,
125
+ .entry_ofs = exrl_1 - exrl_2,
126
+ .expected_si_ofs = 0,
127
+ .expected_pc_ofs = exrl_1 - exrl_2,
128
+ .expected_arg = 0,
129
+ },
130
+ {
131
+ .name = "fallthrough [cross]",
132
+ .test_code = noexec_1,
133
+ .test_len = noexec_end - noexec_1,
134
+ .page_ofs = noexec_1 - noexec_2 - 2,
135
+ .entry_ofs = noexec_1 - noexec_2 - 2,
136
+ .expected_si_ofs = 0,
137
+ .expected_pc_ofs = -2,
138
+ .expected_arg = 1,
139
+ },
140
+ {
141
+ .name = "jump [cross]",
142
+ .test_code = noexec_1,
143
+ .test_len = noexec_end - noexec_1,
144
+ .page_ofs = noexec_1 - noexec_2 - 2,
145
+ .entry_ofs = -2,
146
+ .expected_si_ofs = 0,
147
+ .expected_pc_ofs = -2,
148
+ .expected_arg = 0,
149
+ },
150
+ {
151
+ .name = "exrl [cross]",
152
+ .test_code = exrl_1,
153
+ .test_len = exrl_end - exrl_1,
154
+ .page_ofs = exrl_1 - exrl_2 - 2,
155
+ .entry_ofs = exrl_1 - exrl_2 - 2,
156
+ .expected_si_ofs = 0,
157
+ .expected_pc_ofs = exrl_1 - exrl_2 - 2,
158
+ .expected_arg = 0,
159
+ },
160
+ };
161
+
162
+ return test_noexec(noexec_tests,
163
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
164
+}
165
diff --git a/tests/tcg/multiarch/noexec.c.inc b/tests/tcg/multiarch/noexec.c.inc
166
new file mode 100644
167
index XXXXXXX..XXXXXXX
168
--- /dev/null
169
+++ b/tests/tcg/multiarch/noexec.c.inc
170
@@ -XXX,XX +XXX,XX @@
171
+/*
172
+ * Common code for arch-specific MMU_INST_FETCH fault testing.
173
+ */
174
+
175
+#define _GNU_SOURCE
176
+
177
+#include <assert.h>
178
+#include <signal.h>
179
+#include <stdio.h>
180
+#include <stdlib.h>
181
+#include <string.h>
182
+#include <errno.h>
183
+#include <unistd.h>
184
+#include <sys/mman.h>
185
+#include <sys/ucontext.h>
186
+
187
+/* Forward declarations. */
188
+
189
+static void *arch_mcontext_pc(const mcontext_t *ctx);
190
+static int arch_mcontext_arg(const mcontext_t *ctx);
191
+static void arch_flush(void *p, int len);
192
+
193
+/* Testing infrastructure. */
194
+
195
+struct noexec_test {
196
+ const char *name;
197
+ const char *test_code;
198
+ int test_len;
199
+ int page_ofs;
200
+ int entry_ofs;
201
+ int expected_si_ofs;
202
+ int expected_pc_ofs;
203
+ int expected_arg;
204
+};
205
+
206
+static void *page_base;
207
+static int page_size;
208
+static const struct noexec_test *current_noexec_test;
209
+
210
+static void handle_err(const char *syscall)
211
+{
212
+ printf("[ FAILED ] %s: %s\n", syscall, strerror(errno));
213
+ exit(EXIT_FAILURE);
214
+}
215
+
216
+static void handle_segv(int sig, siginfo_t *info, void *ucontext)
217
+{
218
+ const struct noexec_test *test = current_noexec_test;
219
+ const mcontext_t *mc = &((ucontext_t *)ucontext)->uc_mcontext;
220
+ void *expected_si;
221
+ void *expected_pc;
222
+ void *pc;
223
+ int arg;
224
+
225
+ if (test == NULL) {
226
+ printf("[ FAILED ] unexpected SEGV\n");
227
+ exit(EXIT_FAILURE);
228
+ }
229
+ current_noexec_test = NULL;
230
+
231
+ expected_si = page_base + test->expected_si_ofs;
232
+ if (info->si_addr != expected_si) {
233
+ printf("[ FAILED ] wrong si_addr (%p != %p)\n",
234
+ info->si_addr, expected_si);
235
+ exit(EXIT_FAILURE);
236
+ }
237
+
238
+ pc = arch_mcontext_pc(mc);
239
+ expected_pc = page_base + test->expected_pc_ofs;
240
+ if (pc != expected_pc) {
241
+ printf("[ FAILED ] wrong pc (%p != %p)\n", pc, expected_pc);
242
+ exit(EXIT_FAILURE);
243
+ }
244
+
245
+ arg = arch_mcontext_arg(mc);
246
+ if (arg != test->expected_arg) {
247
+ printf("[ FAILED ] wrong arg (%d != %d)\n", arg, test->expected_arg);
248
+ exit(EXIT_FAILURE);
249
+ }
250
+
251
+ if (mprotect(page_base, page_size,
252
+ PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
253
+ handle_err("mprotect");
254
+ }
255
+}
256
+
257
+static void test_noexec_1(const struct noexec_test *test)
258
+{
259
+ void *start = page_base + test->page_ofs;
260
+ void (*fn)(int arg) = page_base + test->entry_ofs;
261
+
262
+ memcpy(start, test->test_code, test->test_len);
263
+ arch_flush(start, test->test_len);
264
+
265
+ /* Trigger TB creation in order to test invalidation. */
266
+ fn(0);
267
+
268
+ if (mprotect(page_base, page_size, PROT_NONE) < 0) {
269
+ handle_err("mprotect");
270
+ }
271
+
272
+ /* Trigger SEGV and check that handle_segv() ran. */
273
+ current_noexec_test = test;
274
+ fn(0);
275
+ assert(current_noexec_test == NULL);
276
+}
277
+
278
+static int test_noexec(struct noexec_test *tests, size_t n_tests)
279
+{
280
+ struct sigaction act;
281
+ size_t i;
282
+
283
+ memset(&act, 0, sizeof(act));
284
+ act.sa_sigaction = handle_segv;
285
+ act.sa_flags = SA_SIGINFO;
286
+ if (sigaction(SIGSEGV, &act, NULL) < 0) {
287
+ handle_err("sigaction");
288
+ }
289
+
290
+ page_size = getpagesize();
291
+ page_base = mmap(NULL, 2 * page_size,
292
+ PROT_READ | PROT_WRITE | PROT_EXEC,
293
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
294
+ if (page_base == MAP_FAILED) {
295
+ handle_err("mmap");
296
+ }
297
+ page_base += page_size;
298
+
299
+ for (i = 0; i < n_tests; i++) {
300
+ struct noexec_test *test = &tests[i];
301
+
302
+ printf("[ RUN ] %s\n", test->name);
303
+ test_noexec_1(test);
304
+ printf("[ OK ]\n");
305
+ }
306
+
307
+ printf("[ PASSED ]\n");
308
+ return EXIT_SUCCESS;
309
+}
310
diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target
311
index XXXXXXX..XXXXXXX 100644
312
--- a/tests/tcg/s390x/Makefile.target
313
+++ b/tests/tcg/s390x/Makefile.target
314
@@ -XXX,XX +XXX,XX @@ TESTS+=shift
315
TESTS+=trap
316
TESTS+=signals-s390x
317
TESTS+=branch-relative-long
318
+TESTS+=noexec
319
320
Z14_TESTS=vfminmax
321
vfminmax: LDFLAGS+=-lm
322
--
45
--
323
2.34.1
46
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
15
return true;
16
}
17
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_z(ctx, op, z_mask);
50
}
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
New patch
1
The input which overlaps the sign bit of the output can
2
have its input s_mask propagated to the output s_mask.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 14 ++++++++++++--
8
1 file changed, 12 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
47
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
49
- return fold_masks_z(ctx, op, z_mask);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
}
17
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
36
}
37
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
39
- & arg_info(op->args[2])->s_mask;
40
- return false;
41
+ s_mask = arg_info(op->args[1])->s_mask
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
44
}
45
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
The current implementation is a no-op, simply returning addr.
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
This is incorrect, because we ought to be checking the page
3
permissions for execution.
4
5
Make get_page_addr_code inline for both implementations.
6
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Acked-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
3
---
12
include/exec/exec-all.h | 85 ++++++++++++++---------------------------
4
tcg/optimize.c | 6 +++---
13
accel/tcg/cputlb.c | 5 ---
5
1 file changed, 3 insertions(+), 3 deletions(-)
14
accel/tcg/user-exec.c | 14 +++++++
15
3 files changed, 42 insertions(+), 62 deletions(-)
16
6
17
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/exec-all.h
9
--- a/tcg/optimize.c
20
+++ b/include/exec/exec-all.h
10
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
22
hwaddr index, MemTxAttrs attrs);
12
fold_xi_to_x(ctx, op, 1)) {
23
#endif
13
return true;
24
14
}
25
-#if defined(CONFIG_USER_ONLY)
15
- return false;
26
-void mmap_lock(void);
16
+ return finish_folding(ctx, op);
27
-void mmap_unlock(void);
28
-bool have_mmap_lock(void);
29
-
30
/**
31
- * get_page_addr_code() - user-mode version
32
+ * get_page_addr_code_hostp()
33
* @env: CPUArchState
34
* @addr: guest virtual address of guest code
35
*
36
- * Returns @addr.
37
+ * See get_page_addr_code() (full-system version) for documentation on the
38
+ * return value.
39
+ *
40
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
41
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
42
+ * to the host address where @addr's content is kept.
43
+ *
44
+ * Note: this function can trigger an exception.
45
+ */
46
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
47
+ void **hostp);
48
+
49
+/**
50
+ * get_page_addr_code()
51
+ * @env: CPUArchState
52
+ * @addr: guest virtual address of guest code
53
+ *
54
+ * If we cannot translate and execute from the entire RAM page, or if
55
+ * the region is not backed by RAM, returns -1. Otherwise, returns the
56
+ * ram_addr_t corresponding to the guest code at @addr.
57
+ *
58
+ * Note: this function can trigger an exception.
59
*/
60
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
61
target_ulong addr)
62
{
63
- return addr;
64
+ return get_page_addr_code_hostp(env, addr, NULL);
65
}
17
}
66
18
67
-/**
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
68
- * get_page_addr_code_hostp() - user-mode version
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
69
- * @env: CPUArchState
21
fold_xi_to_i(ctx, op, 0)) {
70
- * @addr: guest virtual address of guest code
22
return true;
71
- *
23
}
72
- * Returns @addr.
24
- return false;
73
- *
25
+ return finish_folding(ctx, op);
74
- * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
75
- * is kept.
76
- */
77
-static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
78
- target_ulong addr,
79
- void **hostp)
80
-{
81
- if (hostp) {
82
- *hostp = g2h_untagged(addr);
83
- }
84
- return addr;
85
-}
86
+#if defined(CONFIG_USER_ONLY)
87
+void mmap_lock(void);
88
+void mmap_unlock(void);
89
+bool have_mmap_lock(void);
90
91
/**
92
* adjust_signal_pc:
93
@@ -XXX,XX +XXX,XX @@ G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
94
static inline void mmap_lock(void) {}
95
static inline void mmap_unlock(void) {}
96
97
-/**
98
- * get_page_addr_code() - full-system version
99
- * @env: CPUArchState
100
- * @addr: guest virtual address of guest code
101
- *
102
- * If we cannot translate and execute from the entire RAM page, or if
103
- * the region is not backed by RAM, returns -1. Otherwise, returns the
104
- * ram_addr_t corresponding to the guest code at @addr.
105
- *
106
- * Note: this function can trigger an exception.
107
- */
108
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
109
-
110
-/**
111
- * get_page_addr_code_hostp() - full-system version
112
- * @env: CPUArchState
113
- * @addr: guest virtual address of guest code
114
- *
115
- * See get_page_addr_code() (full-system version) for documentation on the
116
- * return value.
117
- *
118
- * Sets *@hostp (when @hostp is non-NULL) as follows.
119
- * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
120
- * to the host address where @addr's content is kept.
121
- *
122
- * Note: this function can trigger an exception.
123
- */
124
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
125
- void **hostp);
126
-
127
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
128
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
129
130
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/accel/tcg/cputlb.c
133
+++ b/accel/tcg/cputlb.c
134
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
135
return qemu_ram_addr_from_host_nofail(p);
136
}
26
}
137
27
138
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
139
-{
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
140
- return get_page_addr_code_hostp(env, addr, NULL);
30
tcg_opt_gen_movi(ctx, op2, rh, h);
141
-}
31
return true;
142
-
32
}
143
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
33
- return false;
144
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
34
+ return finish_folding(ctx, op);
145
{
146
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/accel/tcg/user-exec.c
149
+++ b/accel/tcg/user-exec.c
150
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
151
return size ? g2h(env_cpu(env), addr) : NULL;
152
}
35
}
153
36
154
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
155
+ void **hostp)
156
+{
157
+ int flags;
158
+
159
+ flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
160
+ g_assert(flags == 0);
161
+
162
+ if (hostp) {
163
+ *hostp = g2h_untagged(addr);
164
+ }
165
+ return addr;
166
+}
167
+
168
/* The softmmu versions of these helpers are in cputlb.c. */
169
170
/*
171
--
38
--
172
2.34.1
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
1
This bit is not saved across interrupts, so we must
1
Avoid the use of the OptContext slots.
2
delay delivering the interrupt until the skip has
3
been processed.
4
2
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1118
3
Be careful not to call fold_masks_zs when the memory operation
6
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
4
is wide enough to require multiple outputs, so split into two
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
9
---
10
target/avr/helper.c | 9 +++++++++
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
11
target/avr/translate.c | 26 ++++++++++++++++++++++----
11
1 file changed, 21 insertions(+), 5 deletions(-)
12
2 files changed, 31 insertions(+), 4 deletions(-)
13
12
14
diff --git a/target/avr/helper.c b/target/avr/helper.c
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/target/avr/helper.c
15
--- a/tcg/optimize.c
17
+++ b/target/avr/helper.c
16
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
19
AVRCPU *cpu = AVR_CPU(cs);
18
return fold_masks_s(ctx, op, s_mask);
20
CPUAVRState *env = &cpu->env;
19
}
21
20
22
+ /*
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
23
+ * We cannot separate a skip from the next instruction,
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
24
+ * as the skip would not be preserved across the interrupt.
23
{
25
+ * Separating the two insn normally only happens at page boundaries.
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
26
+ */
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
27
+ if (env->skip) {
26
MemOp mop = get_memop(oi);
28
+ return false;
27
int width = 8 * memop_size(mop);
29
+ }
28
+ uint64_t z_mask = -1, s_mask = 0;
30
+
29
31
if (interrupt_request & CPU_INTERRUPT_RESET) {
30
if (width < 64) {
32
if (cpu_interrupts_enabled(env)) {
31
if (mop & MO_SIGN) {
33
cs->exception_index = EXCP_RESET;
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
34
diff --git a/target/avr/translate.c b/target/avr/translate.c
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
35
index XXXXXXX..XXXXXXX 100644
34
} else {
36
--- a/target/avr/translate.c
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
37
+++ b/target/avr/translate.c
36
+ z_mask = MAKE_64BIT_MASK(0, width);
38
@@ -XXX,XX +XXX,XX @@ static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
39
if (skip_label) {
40
canonicalize_skip(ctx);
41
gen_set_label(skip_label);
42
- if (ctx->base.is_jmp == DISAS_NORETURN) {
43
+
44
+ switch (ctx->base.is_jmp) {
45
+ case DISAS_NORETURN:
46
ctx->base.is_jmp = DISAS_CHAIN;
47
+ break;
48
+ case DISAS_NEXT:
49
+ if (ctx->base.tb->flags & TB_FLAGS_SKIP) {
50
+ ctx->base.is_jmp = DISAS_TOO_MANY;
51
+ }
52
+ break;
53
+ default:
54
+ break;
55
}
37
}
56
}
38
}
57
39
58
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
40
/* Opcodes that touch guest memory stop the mb optimization. */
59
{
41
ctx->prev_mb = NULL;
60
DisasContext *ctx = container_of(dcbase, DisasContext, base);
42
- return false;
61
bool nonconst_skip = canonicalize_skip(ctx);
43
+
62
+ /*
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
+ * Because we disable interrupts while env->skip is set,
45
+}
64
+ * we must return to the main loop to re-evaluate afterward.
46
+
65
+ */
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
66
+ bool force_exit = ctx->base.tb->flags & TB_FLAGS_SKIP;
48
+{
67
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
68
switch (ctx->base.is_jmp) {
50
+ ctx->prev_mb = NULL;
69
case DISAS_NORETURN:
51
+ return finish_folding(ctx, op);
70
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
52
}
71
case DISAS_NEXT:
53
72
case DISAS_TOO_MANY:
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
73
case DISAS_CHAIN:
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
- if (!nonconst_skip) {
75
+ if (!nonconst_skip && !force_exit) {
76
/* Note gen_goto_tb checks singlestep. */
77
gen_goto_tb(ctx, 1, ctx->npc);
78
break;
56
break;
79
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
57
case INDEX_op_qemu_ld_a32_i32:
80
tcg_gen_movi_tl(cpu_pc, ctx->npc);
58
case INDEX_op_qemu_ld_a64_i32:
81
/* fall through */
59
+ done = fold_qemu_ld_1reg(&ctx, op);
82
case DISAS_LOOKUP:
83
- tcg_gen_lookup_and_goto_ptr();
84
- break;
85
+ if (!force_exit) {
86
+ tcg_gen_lookup_and_goto_ptr();
87
+ break;
60
+ break;
88
+ }
61
case INDEX_op_qemu_ld_a32_i64:
89
+ /* fall through */
62
case INDEX_op_qemu_ld_a64_i64:
90
case DISAS_EXIT:
63
+ if (TCG_TARGET_REG_BITS == 64) {
91
tcg_gen_exit_tb(NULL, 0);
64
+ done = fold_qemu_ld_1reg(&ctx, op);
92
break;
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
73
case INDEX_op_qemu_st8_a32_i32:
74
case INDEX_op_qemu_st8_a64_i32:
93
--
75
--
94
2.34.1
76
2.43.0
95
96
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
Stores have no output operands, and so need no further work.
2
2
3
Right now translator stops right *after* the end of a page, which
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
breaks reporting of fault locations when the last instruction of a
5
multi-insn translation block crosses a page boundary.
6
7
An implementation, like the one arm and s390x have, would require an
8
i386 length disassembler, which is burdensome to maintain. Another
9
alternative would be to single-step at the end of a guest page, but
10
this may come with a performance impact.
11
12
Fix by snapshotting disassembly state and restoring it after we figure
13
out we crossed a page boundary. This includes rolling back cc_op
14
updates and emitted ops.
15
16
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1143
19
Message-Id: <20220817150506.592862-4-iii@linux.ibm.com>
20
[rth: Simplify end-of-insn cross-page checks.]
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
---
5
---
23
target/i386/tcg/translate.c | 64 ++++++++++++++++-----------
6
tcg/optimize.c | 11 +++++------
24
tests/tcg/x86_64/noexec.c | 75 ++++++++++++++++++++++++++++++++
7
1 file changed, 5 insertions(+), 6 deletions(-)
25
tests/tcg/x86_64/Makefile.target | 3 +-
26
3 files changed, 116 insertions(+), 26 deletions(-)
27
create mode 100644 tests/tcg/x86_64/noexec.c
28
8
29
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
30
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
31
--- a/target/i386/tcg/translate.c
11
--- a/tcg/optimize.c
32
+++ b/target/i386/tcg/translate.c
12
+++ b/tcg/optimize.c
33
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
34
TCGv_i64 tmp1_i64;
35
36
sigjmp_buf jmpbuf;
37
+ TCGOp *prev_insn_end;
38
} DisasContext;
39
40
/* The environment in which user-only runs is constrained. */
41
@@ -XXX,XX +XXX,XX @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
42
{
14
{
43
uint64_t pc = s->pc;
15
/* Opcodes that touch guest memory stop the mb optimization. */
44
16
ctx->prev_mb = NULL;
45
+ /* This is a subsequent insn that crosses a page boundary. */
17
- return false;
46
+ if (s->base.num_insns > 1 &&
18
+ return true;
47
+ !is_same_page(&s->base, s->pc + num_bytes - 1)) {
19
}
48
+ siglongjmp(s->jmpbuf, 2);
20
49
+ }
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
50
+
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
51
s->pc += num_bytes;
23
52
if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) {
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
53
/* If the instruction's 16th byte is on a different page than the 1st, a
25
remove_mem_copy_all(ctx);
54
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
26
- return false;
55
int modrm, reg, rm, mod, op, opreg, val;
27
+ return true;
56
target_ulong next_eip, tval;
57
target_ulong pc_start = s->base.pc_next;
58
+ bool orig_cc_op_dirty = s->cc_op_dirty;
59
+ CCOp orig_cc_op = s->cc_op;
60
61
s->pc_start = s->pc = pc_start;
62
s->override = -1;
63
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
64
s->rip_offset = 0; /* for relative ip address */
65
s->vex_l = 0;
66
s->vex_v = 0;
67
- if (sigsetjmp(s->jmpbuf, 0) != 0) {
68
+ switch (sigsetjmp(s->jmpbuf, 0)) {
69
+ case 0:
70
+ break;
71
+ case 1:
72
gen_exception_gpf(s);
73
return s->pc;
74
+ case 2:
75
+ /* Restore state that may affect the next instruction. */
76
+ s->cc_op_dirty = orig_cc_op_dirty;
77
+ s->cc_op = orig_cc_op;
78
+ s->base.num_insns--;
79
+ tcg_remove_ops_after(s->prev_insn_end);
80
+ s->base.is_jmp = DISAS_TOO_MANY;
81
+ return pc_start;
82
+ default:
83
+ g_assert_not_reached();
84
}
28
}
85
29
86
prefixes = 0;
30
switch (op->opc) {
87
@@ -XXX,XX +XXX,XX @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
88
{
32
g_assert_not_reached();
89
DisasContext *dc = container_of(dcbase, DisasContext, base);
33
}
90
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
91
+ dc->prev_insn_end = tcg_last_op();
35
- return false;
92
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
36
+ return true;
93
}
37
}
94
38
95
@@ -XXX,XX +XXX,XX @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
96
#endif
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
97
41
TCGType type;
98
pc_next = disas_insn(dc, cpu);
42
99
-
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
100
- if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
44
- fold_tcg_st(ctx, op);
101
- /* if single step mode, we generate only one instruction and
45
- return false;
102
- generate an exception */
46
+ return fold_tcg_st(ctx, op);
103
- /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
47
}
104
- the flag and abort the translation to give the irqs a
48
105
- chance to happen */
49
src = arg_temp(op->args[0]);
106
- dc->base.is_jmp = DISAS_TOO_MANY;
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
107
- } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
51
last = ofs + tcg_type_size(type) - 1;
108
- && ((pc_next & TARGET_PAGE_MASK)
52
remove_mem_copy_in(ctx, ofs, last);
109
- != ((pc_next + TARGET_MAX_INSN_SIZE - 1)
53
record_mem_copy(ctx, type, src, ofs, last);
110
- & TARGET_PAGE_MASK)
54
- return false;
111
- || (pc_next & ~TARGET_PAGE_MASK) == 0)) {
55
+ return true;
112
- /* Do not cross the boundary of the pages in icount mode,
113
- it can cause an exception. Do it only when boundary is
114
- crossed by the first instruction in the block.
115
- If current instruction already crossed the bound - it's ok,
116
- because an exception hasn't stopped this code.
117
- */
118
- dc->base.is_jmp = DISAS_TOO_MANY;
119
- } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) {
120
- dc->base.is_jmp = DISAS_TOO_MANY;
121
- }
122
-
123
dc->base.pc_next = pc_next;
124
+
125
+ if (dc->base.is_jmp == DISAS_NEXT) {
126
+ if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
127
+ /*
128
+ * If single step mode, we generate only one instruction and
129
+ * generate an exception.
130
+ * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
131
+ * the flag and abort the translation to give the irqs a
132
+ * chance to happen.
133
+ */
134
+ dc->base.is_jmp = DISAS_TOO_MANY;
135
+ } else if (!is_same_page(&dc->base, pc_next)) {
136
+ dc->base.is_jmp = DISAS_TOO_MANY;
137
+ }
138
+ }
139
}
56
}
140
57
141
static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
142
diff --git a/tests/tcg/x86_64/noexec.c b/tests/tcg/x86_64/noexec.c
143
new file mode 100644
144
index XXXXXXX..XXXXXXX
145
--- /dev/null
146
+++ b/tests/tcg/x86_64/noexec.c
147
@@ -XXX,XX +XXX,XX @@
148
+#include "../multiarch/noexec.c.inc"
149
+
150
+static void *arch_mcontext_pc(const mcontext_t *ctx)
151
+{
152
+ return (void *)ctx->gregs[REG_RIP];
153
+}
154
+
155
+int arch_mcontext_arg(const mcontext_t *ctx)
156
+{
157
+ return ctx->gregs[REG_RDI];
158
+}
159
+
160
+static void arch_flush(void *p, int len)
161
+{
162
+}
163
+
164
+extern char noexec_1[];
165
+extern char noexec_2[];
166
+extern char noexec_end[];
167
+
168
+asm("noexec_1:\n"
169
+ " movq $1,%rdi\n" /* %rdi is 0 on entry, set 1. */
170
+ "noexec_2:\n"
171
+ " movq $2,%rdi\n" /* %rdi is 0/1; set 2. */
172
+ " ret\n"
173
+ "noexec_end:");
174
+
175
+int main(void)
176
+{
177
+ struct noexec_test noexec_tests[] = {
178
+ {
179
+ .name = "fallthrough",
180
+ .test_code = noexec_1,
181
+ .test_len = noexec_end - noexec_1,
182
+ .page_ofs = noexec_1 - noexec_2,
183
+ .entry_ofs = noexec_1 - noexec_2,
184
+ .expected_si_ofs = 0,
185
+ .expected_pc_ofs = 0,
186
+ .expected_arg = 1,
187
+ },
188
+ {
189
+ .name = "jump",
190
+ .test_code = noexec_1,
191
+ .test_len = noexec_end - noexec_1,
192
+ .page_ofs = noexec_1 - noexec_2,
193
+ .entry_ofs = 0,
194
+ .expected_si_ofs = 0,
195
+ .expected_pc_ofs = 0,
196
+ .expected_arg = 0,
197
+ },
198
+ {
199
+ .name = "fallthrough [cross]",
200
+ .test_code = noexec_1,
201
+ .test_len = noexec_end - noexec_1,
202
+ .page_ofs = noexec_1 - noexec_2 - 2,
203
+ .entry_ofs = noexec_1 - noexec_2 - 2,
204
+ .expected_si_ofs = 0,
205
+ .expected_pc_ofs = -2,
206
+ .expected_arg = 1,
207
+ },
208
+ {
209
+ .name = "jump [cross]",
210
+ .test_code = noexec_1,
211
+ .test_len = noexec_end - noexec_1,
212
+ .page_ofs = noexec_1 - noexec_2 - 2,
213
+ .entry_ofs = -2,
214
+ .expected_si_ofs = 0,
215
+ .expected_pc_ofs = -2,
216
+ .expected_arg = 0,
217
+ },
218
+ };
219
+
220
+ return test_noexec(noexec_tests,
221
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
222
+}
223
diff --git a/tests/tcg/x86_64/Makefile.target b/tests/tcg/x86_64/Makefile.target
224
index XXXXXXX..XXXXXXX 100644
225
--- a/tests/tcg/x86_64/Makefile.target
226
+++ b/tests/tcg/x86_64/Makefile.target
227
@@ -XXX,XX +XXX,XX @@ include $(SRC_PATH)/tests/tcg/i386/Makefile.target
228
229
ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET))
230
X86_64_TESTS += vsyscall
231
+X86_64_TESTS += noexec
232
TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64
233
else
234
TESTS=$(MULTIARCH_TESTS)
235
@@ -XXX,XX +XXX,XX @@ test-x86_64: LDFLAGS+=-lm -lc
236
test-x86_64: test-i386.c test-i386.h test-i386-shift.h test-i386-muldiv.h
237
    $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
238
239
-vsyscall: $(SRC_PATH)/tests/tcg/x86_64/vsyscall.c
240
+%: $(SRC_PATH)/tests/tcg/x86_64/%.c
241
    $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
242
--
59
--
243
2.34.1
60
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return finish_folding(ctx, op);
16
}
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
71
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 27 ++++++++++++++-------------
7
1 file changed, 14 insertions(+), 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
24
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
28
+ t2 = arg_info(op->args[2]);
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
66
break;
67
default:
68
break;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
74
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
16
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t s_mask, z_mask, sign;
20
+ uint64_t s_mask, z_mask;
21
TempOptInfo *t1, *t2;
22
23
if (fold_const2(ctx, op) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
25
* If the sign bit is known zero, then logical right shift
26
* will not reduce the number of input sign repetitions.
27
*/
28
- sign = -s_mask;
29
- if (sign && !(z_mask & sign)) {
30
+ if (~z_mask & -s_mask) {
31
return fold_masks_s(ctx, op, s_mask);
32
}
33
break;
34
--
35
2.43.0
diff view generated by jsdifflib
New patch
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 9 ++++++---
8
1 file changed, 6 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
15
fold_sub_to_neg(ctx, op)) {
16
return true;
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
21
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
{
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
38
}
39
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
41
--
42
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 16 +++++++++-------
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
12
TCGType type;
13
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
type = ctx->type;
20
--
21
2.43.0
diff view generated by jsdifflib
1
The base qemu_ram_addr_from_host function is already in
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
softmmu/physmem.c; move the nofail version to be adjacent.
2
Remove fold_masks as the function becomes unused.
3
3
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/exec/cpu-common.h | 1 +
7
tcg/optimize.c | 18 ++++++++----------
10
accel/tcg/cputlb.c | 12 ------------
8
1 file changed, 8 insertions(+), 10 deletions(-)
11
softmmu/physmem.c | 12 ++++++++++++
12
3 files changed, 13 insertions(+), 12 deletions(-)
13
9
14
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu-common.h
12
--- a/tcg/optimize.c
17
+++ b/include/exec/cpu-common.h
13
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef uintptr_t ram_addr_t;
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
15
return fold_masks_zs(ctx, op, -1, s_mask);
20
/* This should not be used by devices. */
21
ram_addr_t qemu_ram_addr_from_host(void *ptr);
22
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
23
RAMBlock *qemu_ram_block_by_name(const char *name);
24
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
25
ram_addr_t *offset);
26
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/cputlb.c
29
+++ b/accel/tcg/cputlb.c
30
@@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
31
prot, mmu_idx, size);
32
}
16
}
33
17
34
-static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
35
-{
19
-{
36
- ram_addr_t ram_addr;
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
37
-
38
- ram_addr = qemu_ram_addr_from_host(ptr);
39
- if (ram_addr == RAM_ADDR_INVALID) {
40
- error_report("Bad ram pointer %p", ptr);
41
- abort();
42
- }
43
- return ram_addr;
44
-}
21
-}
45
-
22
-
46
/*
23
/*
47
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
24
* An "affected" mask bit is 0 if and only if the result is identical
48
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
25
* to the first input. Thus if the entire mask is 0, the operation
49
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
50
index XXXXXXX..XXXXXXX 100644
27
51
--- a/softmmu/physmem.c
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
52
+++ b/softmmu/physmem.c
29
{
53
@@ -XXX,XX +XXX,XX @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
30
+ uint64_t z_mask, s_mask;
54
return block->offset + offset;
31
+ TempOptInfo *t1, *t2;
32
+
33
if (fold_const2_commutative(ctx, op) ||
34
fold_xx_to_i(ctx, op, 0) ||
35
fold_xi_to_x(ctx, op, 0) ||
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
38
}
39
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
50
}
56
51
57
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
58
+{
59
+ ram_addr_t ram_addr;
60
+
61
+ ram_addr = qemu_ram_addr_from_host(ptr);
62
+ if (ram_addr == RAM_ADDR_INVALID) {
63
+ error_report("Bad ram pointer %p", ptr);
64
+ abort();
65
+ }
66
+ return ram_addr;
67
+}
68
+
69
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
70
MemTxAttrs attrs, void *buf, hwaddr len);
71
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
72
--
53
--
73
2.34.1
54
2.43.0
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
Currently it's possible to execute pages that do not have PAGE_EXEC
4
if there is an existing translation block. Fix by invalidating TBs
5
that touch the affected pages.
6
7
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Message-Id: <20220817150506.592862-2-iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
3
---
11
linux-user/mmap.c | 6 ++++--
4
tcg/optimize.c | 2 +-
12
1 file changed, 4 insertions(+), 2 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
13
6
14
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/mmap.c
9
--- a/tcg/optimize.c
17
+++ b/linux-user/mmap.c
10
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
19
goto error;
12
return fold_orc(ctx, op);
20
}
13
}
21
}
14
}
22
+
15
- return false;
23
page_set_flags(start, start + len, page_flags);
16
+ return finish_folding(ctx, op);
24
- mmap_unlock();
17
}
25
- return 0;
18
26
+ tb_invalidate_phys_range(start, start + len);
19
/* Propagate constants and copies, fold constant expressions. */
27
+ ret = 0;
28
+
29
error:
30
mmap_unlock();
31
return ret;
32
--
20
--
33
2.34.1
21
2.43.0
diff view generated by jsdifflib
New patch
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 ++----
8
1 file changed, 2 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
21
-
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
All mask setting is now done with parameters via fold_masks_*.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 -------------
7
1 file changed, 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
15
16
/* In flight values from optimization. */
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
47
--
48
2.43.0
diff view generated by jsdifflib
1
Map the stack executable if required by default or on demand.
1
All instances of s_mask have been converted to the new
2
representation. We can now re-enable usage.
2
3
3
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/elf.h | 1 +
7
tcg/optimize.c | 4 ++--
8
linux-user/qemu.h | 1 +
8
1 file changed, 2 insertions(+), 2 deletions(-)
9
linux-user/elfload.c | 19 ++++++++++++++++++-
10
3 files changed, 20 insertions(+), 1 deletion(-)
11
9
12
diff --git a/include/elf.h b/include/elf.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/include/elf.h
12
--- a/tcg/optimize.c
15
+++ b/include/elf.h
13
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef int64_t Elf64_Sxword;
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
17
#define PT_LOPROC 0x70000000
15
g_assert_not_reached();
18
#define PT_HIPROC 0x7fffffff
19
20
+#define PT_GNU_STACK (PT_LOOS + 0x474e551)
21
#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
22
23
#define PT_MIPS_REGINFO 0x70000000
24
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/linux-user/qemu.h
27
+++ b/linux-user/qemu.h
28
@@ -XXX,XX +XXX,XX @@ struct image_info {
29
uint32_t elf_flags;
30
int personality;
31
abi_ulong alignment;
32
+ bool exec_stack;
33
34
/* Generic semihosting knows about these pointers. */
35
abi_ulong arg_strings; /* strings for argv */
36
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/linux-user/elfload.c
39
+++ b/linux-user/elfload.c
40
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
41
#define ELF_ARCH EM_386
42
43
#define ELF_PLATFORM get_elf_platform()
44
+#define EXSTACK_DEFAULT true
45
46
static const char *get_elf_platform(void)
47
{
48
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
49
50
#define ELF_ARCH EM_ARM
51
#define ELF_CLASS ELFCLASS32
52
+#define EXSTACK_DEFAULT true
53
54
static inline void init_thread(struct target_pt_regs *regs,
55
struct image_info *infop)
56
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
57
#else
58
59
#define ELF_CLASS ELFCLASS32
60
+#define EXSTACK_DEFAULT true
61
62
#endif
63
64
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
65
66
#define ELF_CLASS ELFCLASS64
67
#define ELF_ARCH EM_LOONGARCH
68
+#define EXSTACK_DEFAULT true
69
70
#define elf_check_arch(x) ((x) == EM_LOONGARCH)
71
72
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
73
#define ELF_CLASS ELFCLASS32
74
#endif
75
#define ELF_ARCH EM_MIPS
76
+#define EXSTACK_DEFAULT true
77
78
#ifdef TARGET_ABI_MIPSN32
79
#define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
80
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
81
#define bswaptls(ptr) bswap32s(ptr)
82
#endif
83
84
+#ifndef EXSTACK_DEFAULT
85
+#define EXSTACK_DEFAULT false
86
+#endif
87
+
88
#include "elf.h"
89
90
/* We must delay the following stanzas until after "elf.h". */
91
@@ -XXX,XX +XXX,XX @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
92
struct image_info *info)
93
{
94
abi_ulong size, error, guard;
95
+ int prot;
96
97
size = guest_stack_size;
98
if (size < STACK_LOWER_LIMIT) {
99
@@ -XXX,XX +XXX,XX @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
100
guard = qemu_real_host_page_size();
101
}
16
}
102
17
103
- error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
104
+ prot = PROT_READ | PROT_WRITE;
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
105
+ if (info->exec_stack) {
20
return true;
106
+ prot |= PROT_EXEC;
107
+ }
108
+ error = target_mmap(0, size + guard, prot,
109
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
110
if (error == -1) {
111
perror("mmap stack");
112
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
113
*/
114
loaddr = -1, hiaddr = 0;
115
info->alignment = 0;
116
+ info->exec_stack = EXSTACK_DEFAULT;
117
for (i = 0; i < ehdr->e_phnum; ++i) {
118
struct elf_phdr *eppnt = phdr + i;
119
if (eppnt->p_type == PT_LOAD) {
120
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
121
if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) {
122
goto exit_errmsg;
123
}
124
+ } else if (eppnt->p_type == PT_GNU_STACK) {
125
+ info->exec_stack = eppnt->p_flags & PF_X;
126
}
127
}
21
}
128
22
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
24
s_mask = s_mask_old >> pos;
25
s_mask |= -1ull << (len - 1);
26
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
return true;
30
}
31
129
--
32
--
130
2.34.1
33
2.43.0
diff view generated by jsdifflib
1
The function is not used outside of cpu-exec.c. Move it and
1
The big comment just above says functions should be sorted.
2
its subroutines up in the file, before the first use.
2
Add forward declarations as needed.
3
3
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/exec/exec-all.h | 3 -
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
10
accel/tcg/cpu-exec.c | 122 ++++++++++++++++++++--------------------
8
1 file changed, 59 insertions(+), 55 deletions(-)
11
2 files changed, 61 insertions(+), 64 deletions(-)
12
9
13
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/exec-all.h
12
--- a/tcg/optimize.c
16
+++ b/include/exec/exec-all.h
13
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
18
#endif
15
* 3) those that produce information about the result value.
19
void tb_flush(CPUState *cpu);
16
*/
20
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
17
21
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
22
- target_ulong cs_base, uint32_t flags,
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
23
- uint32_t cflags);
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
24
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
21
+
25
22
static bool fold_add(OptContext *ctx, TCGOp *op)
26
/* GETPC is the true target of the return instruction that we'll execute. */
23
{
27
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
24
if (fold_const2_commutative(ctx, op) ||
28
index XXXXXXX..XXXXXXX 100644
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
29
--- a/accel/tcg/cpu-exec.c
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
30
+++ b/accel/tcg/cpu-exec.c
31
@@ -XXX,XX +XXX,XX @@ uint32_t curr_cflags(CPUState *cpu)
32
return cflags;
33
}
27
}
34
28
35
+struct tb_desc {
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
36
+ target_ulong pc;
30
+{
37
+ target_ulong cs_base;
31
+ /* If true and false values are the same, eliminate the cmp. */
38
+ CPUArchState *env;
32
+ if (args_are_copies(op->args[2], op->args[3])) {
39
+ tb_page_addr_t phys_page1;
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
40
+ uint32_t flags;
34
+ }
41
+ uint32_t cflags;
42
+ uint32_t trace_vcpu_dstate;
43
+};
44
+
35
+
45
+static bool tb_lookup_cmp(const void *p, const void *d)
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
46
+{
37
+ uint64_t tv = arg_info(op->args[2])->val;
47
+ const TranslationBlock *tb = p;
38
+ uint64_t fv = arg_info(op->args[3])->val;
48
+ const struct tb_desc *desc = d;
49
+
39
+
50
+ if (tb->pc == desc->pc &&
40
+ if (tv == -1 && fv == 0) {
51
+ tb->page_addr[0] == desc->phys_page1 &&
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
52
+ tb->cs_base == desc->cs_base &&
42
+ }
53
+ tb->flags == desc->flags &&
43
+ if (tv == 0 && fv == -1) {
54
+ tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
44
+ if (TCG_TARGET_HAS_not_vec) {
55
+ tb_cflags(tb) == desc->cflags) {
45
+ op->opc = INDEX_op_not_vec;
56
+ /* check next page if needed */
46
+ return fold_not(ctx, op);
57
+ if (tb->page_addr[1] == -1) {
47
+ } else {
58
+ return true;
48
+ op->opc = INDEX_op_xor_vec;
59
+ } else {
49
+ op->args[2] = arg_new_constant(ctx, -1);
60
+ tb_page_addr_t phys_page2;
50
+ return fold_xor(ctx, op);
61
+ target_ulong virt_page2;
62
+
63
+ virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
64
+ phys_page2 = get_page_addr_code(desc->env, virt_page2);
65
+ if (tb->page_addr[1] == phys_page2) {
66
+ return true;
67
+ }
51
+ }
68
+ }
52
+ }
69
+ }
53
+ }
70
+ return false;
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
71
+}
82
+}
72
+
83
+
73
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
74
+ target_ulong cs_base, uint32_t flags,
85
{
75
+ uint32_t cflags)
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
76
+{
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
77
+ tb_page_addr_t phys_pc;
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
78
+ struct tb_desc desc;
79
+ uint32_t h;
80
+
81
+ desc.env = cpu->env_ptr;
82
+ desc.cs_base = cs_base;
83
+ desc.flags = flags;
84
+ desc.cflags = cflags;
85
+ desc.trace_vcpu_dstate = *cpu->trace_dstate;
86
+ desc.pc = pc;
87
+ phys_pc = get_page_addr_code(desc.env, pc);
88
+ if (phys_pc == -1) {
89
+ return NULL;
90
+ }
91
+ desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
92
+ h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
93
+ return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
94
+}
95
+
96
/* Might cause an exception, so have a longjmp destination ready */
97
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
98
target_ulong cs_base,
99
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
100
end_exclusive();
101
}
89
}
102
90
103
-struct tb_desc {
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
104
- target_ulong pc;
92
-{
105
- target_ulong cs_base;
93
- /* If true and false values are the same, eliminate the cmp. */
106
- CPUArchState *env;
94
- if (args_are_copies(op->args[2], op->args[3])) {
107
- tb_page_addr_t phys_page1;
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
108
- uint32_t flags;
96
- }
109
- uint32_t cflags;
110
- uint32_t trace_vcpu_dstate;
111
-};
112
-
97
-
113
-static bool tb_lookup_cmp(const void *p, const void *d)
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
114
-{
99
- uint64_t tv = arg_info(op->args[2])->val;
115
- const TranslationBlock *tb = p;
100
- uint64_t fv = arg_info(op->args[3])->val;
116
- const struct tb_desc *desc = d;
117
-
101
-
118
- if (tb->pc == desc->pc &&
102
- if (tv == -1 && fv == 0) {
119
- tb->page_addr[0] == desc->phys_page1 &&
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
120
- tb->cs_base == desc->cs_base &&
104
- }
121
- tb->flags == desc->flags &&
105
- if (tv == 0 && fv == -1) {
122
- tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
106
- if (TCG_TARGET_HAS_not_vec) {
123
- tb_cflags(tb) == desc->cflags) {
107
- op->opc = INDEX_op_not_vec;
124
- /* check next page if needed */
108
- return fold_not(ctx, op);
125
- if (tb->page_addr[1] == -1) {
109
- } else {
126
- return true;
110
- op->opc = INDEX_op_xor_vec;
127
- } else {
111
- op->args[2] = arg_new_constant(ctx, -1);
128
- tb_page_addr_t phys_page2;
112
- return fold_xor(ctx, op);
129
- target_ulong virt_page2;
130
-
131
- virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
132
- phys_page2 = get_page_addr_code(desc->env, virt_page2);
133
- if (tb->page_addr[1] == phys_page2) {
134
- return true;
135
- }
113
- }
136
- }
114
- }
137
- }
115
- }
138
- return false;
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
139
-}
144
-}
140
-
145
-
141
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
146
/* Propagate constants and copies, fold constant expressions. */
142
- target_ulong cs_base, uint32_t flags,
147
void tcg_optimize(TCGContext *s)
143
- uint32_t cflags)
144
-{
145
- tb_page_addr_t phys_pc;
146
- struct tb_desc desc;
147
- uint32_t h;
148
-
149
- desc.env = cpu->env_ptr;
150
- desc.cs_base = cs_base;
151
- desc.flags = flags;
152
- desc.cflags = cflags;
153
- desc.trace_vcpu_dstate = *cpu->trace_dstate;
154
- desc.pc = pc;
155
- phys_pc = get_page_addr_code(desc.env, pc);
156
- if (phys_pc == -1) {
157
- return NULL;
158
- }
159
- desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
160
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
161
- return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
162
-}
163
-
164
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
165
{
148
{
166
if (TCG_TARGET_HAS_direct_jump) {
167
--
149
--
168
2.34.1
150
2.43.0
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means that we've
1
The big comment just above says functions should be sorted.
2
got to mark page zero executable. We had been special casing this
3
entirely within translate.
4
2
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
linux-user/elfload.c | 34 +++++++++++++++++++++++++++++++---
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
10
1 file changed, 31 insertions(+), 3 deletions(-)
7
1 file changed, 30 insertions(+), 30 deletions(-)
11
8
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/elfload.c
11
--- a/tcg/optimize.c
15
+++ b/linux-user/elfload.c
12
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
17
regs->gr[31] = infop->entry;
14
return true;
18
}
15
}
19
16
20
+#define LO_COMMPAGE 0
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
18
+{
19
+ /* Canonicalize the comparison to put immediate second. */
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
22
+ }
23
+ return finish_folding(ctx, op);
24
+}
21
+
25
+
22
+static bool init_guest_commpage(void)
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
23
+{
27
+{
24
+ void *want = g2h_untagged(LO_COMMPAGE);
28
+ /* If true and false values are the same, eliminate the cmp. */
25
+ void *addr = mmap(want, qemu_host_page_size, PROT_NONE,
29
+ if (args_are_copies(op->args[3], op->args[4])) {
26
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
27
+
28
+ if (addr == MAP_FAILED) {
29
+ perror("Allocating guest commpage");
30
+ exit(EXIT_FAILURE);
31
+ }
32
+ if (addr != want) {
33
+ return false;
34
+ }
31
+ }
35
+
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
36
+ /*
37
+ /*
37
+ * On Linux, page zero is normally marked execute only + gateway.
38
+ * Canonicalize the "false" input reg to match the destination,
38
+ * Normal read or write is supposed to fail (thus PROT_NONE above),
39
+ * so that the tcg backend can implement "move if true".
39
+ * but specific offsets have kernel code mapped to raise permissions
40
+ * and implement syscalls. Here, simply mark the page executable.
41
+ * Special case the entry points during translation (see do_page_zero).
42
+ */
40
+ */
43
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
44
+ PAGE_EXEC | PAGE_VALID);
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
45
+ return true;
43
+ }
44
+ return finish_folding(ctx, op);
46
+}
45
+}
47
+
46
+
48
#endif /* TARGET_HPPA */
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
49
48
{
50
#ifdef TARGET_XTENSA
49
uint64_t z_mask, s_mask;
51
@@ -XXX,XX +XXX,XX @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
52
}
52
}
53
53
54
#if defined(HI_COMMPAGE)
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
55
-#define LO_COMMPAGE 0
55
-{
56
+#define LO_COMMPAGE -1
56
- /* Canonicalize the comparison to put immediate second. */
57
#elif defined(LO_COMMPAGE)
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
58
#define HI_COMMPAGE 0
58
- op->args[3] = tcg_swap_cond(op->args[3]);
59
#else
59
- }
60
#define HI_COMMPAGE 0
60
- return finish_folding(ctx, op);
61
-#define LO_COMMPAGE 0
61
-}
62
+#define LO_COMMPAGE -1
62
-
63
#define init_guest_commpage() true
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
64
#endif
64
-{
65
65
- /* If true and false values are the same, eliminate the cmp. */
66
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
66
- if (args_are_copies(op->args[3], op->args[4])) {
67
} else {
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
68
offset = -(HI_COMMPAGE & -align);
68
- }
69
}
69
-
70
- } else if (LO_COMMPAGE != 0) {
70
- /* Canonicalize the comparison to put immediate second. */
71
+ } else if (LO_COMMPAGE != -1) {
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
}
73
- }
74
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
82
-}
83
-
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
85
{
86
uint64_t z_mask, s_mask, s_mask_old;
75
--
87
--
76
2.34.1
88
2.43.0
diff view generated by jsdifflib
1
These will be useful in properly ending the TB.
1
We currently have a flag, float_muladd_halve_result, to scale
2
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
3
4
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
target/riscv/translate.c | 10 +++++++++-
7
include/fpu/softfloat.h | 6 ++++
9
1 file changed, 9 insertions(+), 1 deletion(-)
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
10
9
fpu/softfloat-parts.c.inc | 7 +++--
11
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/translate.c
14
--- a/include/fpu/softfloat.h
14
+++ b/target/riscv/translate.c
15
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
16
/* Include decoders for factored-out extensions */
17
float16 float16_sub(float16, float16, float_status *status);
17
#include "decode-XVentanaCondOps.c.inc"
18
float16 float16_mul(float16, float16, float_status *status);
18
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
19
+/* The specification allows for longer insns, but not supported by qemu. */
20
+float16 float16_muladd_scalbn(float16, float16, float16,
20
+#define MAX_INSN_LEN 4
21
+ int, int, float_status *status);
21
+
22
float16 float16_div(float16, float16, float_status *status);
22
+static inline int insn_len(uint16_t first_word)
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
23
+{
97
+{
24
+ return (first_word & 3) == 3 ? 4 : 2;
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
25
+}
99
+}
26
+
100
+
27
static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
101
+float32 QEMU_SOFTFLOAT_ATTR
28
{
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
29
/*
103
+ int scale, int flags, float_status *status)
30
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
104
{
31
};
105
FloatParts64 pa, pb, pc, *pr;
32
106
33
/* Check for compressed insn */
107
float32_unpack_canonical(&pa, a, status);
34
- if (extract16(opcode, 0, 2) != 3) {
108
float32_unpack_canonical(&pb, b, status);
35
+ if (insn_len(opcode) == 2) {
109
float32_unpack_canonical(&pc, c, status);
36
if (!has_ext(ctx, RVC)) {
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
37
gen_exception_illegal(ctx);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
38
} else {
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
187
}
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
190
index XXXXXXX..XXXXXXX 100644
191
--- a/fpu/softfloat-parts.c.inc
192
+++ b/fpu/softfloat-parts.c.inc
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
194
* Requires A and C extracted into a double-sized structure to provide the
195
* extra space for the widening multiply.
196
*/
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
39
--
217
--
40
2.34.1
218
2.43.0
219
220
diff view generated by jsdifflib
1
While there are no target-specific nonfaulting probes,
1
Use the scalbn interface instead of float_muladd_halve_result.
2
generic code may grow some uses at some point.
3
2
4
Note that the attrs argument was incorrect -- it should have
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
been MEMTXATTRS_UNSPECIFIED. Just use the simpler interface.
6
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
target/avr/helper.c | 46 ++++++++++++++++++++++++++++-----------------
6
target/arm/tcg/helper-a64.c | 6 +++---
11
1 file changed, 29 insertions(+), 17 deletions(-)
7
1 file changed, 3 insertions(+), 3 deletions(-)
12
8
13
diff --git a/target/avr/helper.c b/target/avr/helper.c
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/target/avr/helper.c
11
--- a/target/arm/tcg/helper-a64.c
16
+++ b/target/avr/helper.c
12
+++ b/target/arm/tcg/helper-a64.c
17
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
18
MMUAccessType access_type, int mmu_idx,
14
(float16_is_infinity(b) && float16_is_zero(a))) {
19
bool probe, uintptr_t retaddr)
15
return float16_one_point_five;
20
{
21
- int prot = 0;
22
- MemTxAttrs attrs = {};
23
+ int prot, page_size = TARGET_PAGE_SIZE;
24
uint32_t paddr;
25
26
address &= TARGET_PAGE_MASK;
27
28
if (mmu_idx == MMU_CODE_IDX) {
29
- /* access to code in flash */
30
+ /* Access to code in flash. */
31
paddr = OFFSET_CODE + address;
32
prot = PAGE_READ | PAGE_EXEC;
33
- if (paddr + TARGET_PAGE_SIZE > OFFSET_DATA) {
34
+ if (paddr >= OFFSET_DATA) {
35
+ /*
36
+ * This should not be possible via any architectural operations.
37
+ * There is certainly not an exception that we can deliver.
38
+ * Accept probing that might come from generic code.
39
+ */
40
+ if (probe) {
41
+ return false;
42
+ }
43
error_report("execution left flash memory");
44
abort();
45
}
46
- } else if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
47
- /*
48
- * access to CPU registers, exit and rebuilt this TB to use full access
49
- * incase it touches specially handled registers like SREG or SP
50
- */
51
- AVRCPU *cpu = AVR_CPU(cs);
52
- CPUAVRState *env = &cpu->env;
53
- env->fullacc = 1;
54
- cpu_loop_exit_restore(cs, retaddr);
55
} else {
56
- /* access to memory. nothing special */
57
+ /* Access to memory. */
58
paddr = OFFSET_DATA + address;
59
prot = PAGE_READ | PAGE_WRITE;
60
+ if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
61
+ /*
62
+ * Access to CPU registers, exit and rebuilt this TB to use
63
+ * full access in case it touches specially handled registers
64
+ * like SREG or SP. For probing, set page_size = 1, in order
65
+ * to force tlb_fill to be called for the next access.
66
+ */
67
+ if (probe) {
68
+ page_size = 1;
69
+ } else {
70
+ AVRCPU *cpu = AVR_CPU(cs);
71
+ CPUAVRState *env = &cpu->env;
72
+ env->fullacc = 1;
73
+ cpu_loop_exit_restore(cs, retaddr);
74
+ }
75
+ }
76
}
16
}
77
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
78
- tlb_set_page_with_attrs(cs, address, paddr, attrs, prot,
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
79
- mmu_idx, TARGET_PAGE_SIZE);
80
-
81
+ tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size);
82
return true;
83
}
19
}
84
20
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
23
(float32_is_infinity(b) && float32_is_zero(a))) {
24
return float32_one_point_five;
25
}
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
28
}
29
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
32
(float64_is_infinity(b) && float64_is_zero(a))) {
33
return float64_one_point_five;
34
}
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
37
}
38
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
85
--
40
--
86
2.34.1
41
2.43.0
87
42
88
43
diff view generated by jsdifflib
New patch
1
1
Use the scalbn interface instead of float_muladd_halve_result.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/sparc/helper.h | 4 +-
7
target/sparc/fop_helper.c | 8 ++--
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/helper.h
14
+++ b/target/sparc/helper.h
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
23
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
205
--
206
2.43.0
207
208
diff view generated by jsdifflib
1
There is no need to go through cc->tcg_ops when
1
All uses have been convered to float*_muladd_scalbn.
2
we know what value that must have.
3
2
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
target/avr/helper.c | 5 ++---
6
include/fpu/softfloat.h | 3 ---
9
1 file changed, 2 insertions(+), 3 deletions(-)
7
fpu/softfloat.c | 6 ------
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
10
10
11
diff --git a/target/avr/helper.c b/target/avr/helper.c
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/helper.c
13
--- a/include/fpu/softfloat.h
14
+++ b/target/avr/helper.c
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
16
| Using these differs from negating an input or output before calling
17
{
17
| the muladd function in that this means that a NaN doesn't have its
18
bool ret = false;
18
| sign bit inverted before it is propagated.
19
- CPUClass *cc = CPU_GET_CLASS(cs);
19
-| We also support halving the result before rounding, as a special
20
AVRCPU *cpu = AVR_CPU(cs);
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
21
CPUAVRState *env = &cpu->env;
21
*----------------------------------------------------------------------------*/
22
22
enum {
23
if (interrupt_request & CPU_INTERRUPT_RESET) {
23
float_muladd_negate_c = 1,
24
if (cpu_interrupts_enabled(env)) {
24
float_muladd_negate_product = 2,
25
cs->exception_index = EXCP_RESET;
25
float_muladd_negate_result = 4,
26
- cc->tcg_ops->do_interrupt(cs);
26
- float_muladd_halve_result = 8,
27
+ avr_cpu_do_interrupt(cs);
27
};
28
28
29
cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
29
/*----------------------------------------------------------------------------
30
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
31
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
31
index XXXXXXX..XXXXXXX 100644
32
if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
32
--- a/fpu/softfloat.c
33
int index = ctz32(env->intsrc);
33
+++ b/fpu/softfloat.c
34
cs->exception_index = EXCP_INT(index);
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
35
- cc->tcg_ops->do_interrupt(cs);
35
if (unlikely(!can_use_fpu(s))) {
36
+ avr_cpu_do_interrupt(cs);
36
goto soft;
37
37
}
38
env->intsrc &= env->intsrc - 1; /* clear the interrupt */
38
- if (unlikely(flags & float_muladd_halve_result)) {
39
if (!env->intsrc) {
39
- goto soft;
40
- }
41
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
55
index XXXXXXX..XXXXXXX 100644
56
--- a/fpu/softfloat-parts.c.inc
57
+++ b/fpu/softfloat-parts.c.inc
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
59
a->exp = p_widen.exp;
60
61
return_normal:
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
63
- if (flags & float_muladd_halve_result) {
64
- a->exp -= 1;
65
- }
66
a->exp += scale;
67
finish_sign:
68
if (flags & float_muladd_negate_result) {
40
--
69
--
41
2.34.1
70
2.43.0
42
71
43
72
diff view generated by jsdifflib
New patch
1
This rounding mode is used by Hexagon.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/fpu/softfloat-types.h | 2 ++
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
8
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/fpu/softfloat-types.h
12
+++ b/include/fpu/softfloat-types.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
21
/*
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/fpu/softfloat-parts.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
36
--
37
2.43.0
diff view generated by jsdifflib
New patch
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/fpu/softfloat.h | 5 +++++
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
39
}
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
61
--
62
2.43.0
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means that we've
1
There are no special cases for this instruction.
2
got to mark the vsyscall page executable. We had been special
2
Remove internal_mpyf as unused.
3
casing this entirely within translate.
4
3
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
linux-user/elfload.c | 23 +++++++++++++++++++++++
7
target/hexagon/fma_emu.h | 1 -
10
1 file changed, 23 insertions(+)
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
11
11
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/elfload.c
14
--- a/target/hexagon/fma_emu.h
15
+++ b/linux-user/elfload.c
15
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
(*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
18
}
30
}
19
31
20
+#if ULONG_MAX >= TARGET_VSYSCALL_PAGE
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
21
+#define INIT_GUEST_COMMPAGE
33
-{
22
+static bool init_guest_commpage(void)
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
23
+{
35
- return float32_mul(a, b, fp_status);
24
+ /*
36
- }
25
+ * The vsyscall page is at a high negative address aka kernel space,
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
26
+ * which means that we cannot actually allocate it with target_mmap.
38
-}
27
+ * We still should be able to use page_set_flags, unless the user
39
-
28
+ * has specified -R reserved_va, which would trigger an assert().
40
float64 internal_mpyhh(float64 a, float64 b,
29
+ */
41
unsigned long long int accumulated,
30
+ if (reserved_va != 0 &&
42
float_status *fp_status)
31
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
32
+ error_report("Cannot allocate vsyscall page");
44
index XXXXXXX..XXXXXXX 100644
33
+ exit(EXIT_FAILURE);
45
--- a/target/hexagon/op_helper.c
34
+ }
46
+++ b/target/hexagon/op_helper.c
35
+ page_set_flags(TARGET_VSYSCALL_PAGE,
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
36
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
37
+ PAGE_EXEC | PAGE_VALID);
38
+ return true;
39
+}
40
+#endif
41
#else
42
43
#define ELF_START_MMAP 0x80000000
44
@@ -XXX,XX +XXX,XX @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
45
#else
46
#define HI_COMMPAGE 0
47
#define LO_COMMPAGE -1
48
+#ifndef INIT_GUEST_COMMPAGE
49
#define init_guest_commpage() true
50
#endif
51
+#endif
52
53
static void pgb_fail_in_use(const char *image_name)
54
{
48
{
49
float32 RdV;
50
arch_fpop_start(env);
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
53
arch_fpop_end(env);
54
return RdV;
55
}
55
--
56
--
56
2.34.1
57
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
1
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
1
5
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/hexagon/op_helper.c | 11 +++--------
10
1 file changed, 3 insertions(+), 8 deletions(-)
11
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/op_helper.c
15
+++ b/target/hexagon/op_helper.c
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
18
float32 RsV, float32 RtV, float32 PuV)
19
{
20
- size4s_t tmp;
21
arch_fpop_start(env);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
35
--
36
2.43.0
diff view generated by jsdifflib
1
Cache the translation from guest to host address, so we may
1
There are multiple special cases for this instruction.
2
use direct loads when we hit on the primary translation page.
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
3
8
4
Look up the second translation page only once, during translation.
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
This obviates another lookup of the second page within tb_gen_code
6
after translation.
7
8
Fixes a bug in that plugin_insn_append should be passed the bytes
9
in the original memory order, not bswapped by pieces.
10
11
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
12
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
11
---
15
include/exec/translator.h | 63 +++++++++++--------
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
16
accel/tcg/translate-all.c | 23 +++----
13
1 file changed, 26 insertions(+), 79 deletions(-)
17
accel/tcg/translator.c | 126 +++++++++++++++++++++++++++++---------
18
3 files changed, 141 insertions(+), 71 deletions(-)
19
14
20
diff --git a/include/exec/translator.h b/include/exec/translator.h
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/translator.h
17
--- a/target/hexagon/op_helper.c
23
+++ b/include/exec/translator.h
18
+++ b/target/hexagon/op_helper.c
24
@@ -XXX,XX +XXX,XX @@ typedef enum DisasJumpType {
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
25
* Architecture-agnostic disassembly context.
20
return RxV;
26
*/
27
typedef struct DisasContextBase {
28
- const TranslationBlock *tb;
29
+ TranslationBlock *tb;
30
target_ulong pc_first;
31
target_ulong pc_next;
32
DisasJumpType is_jmp;
33
int num_insns;
34
int max_insns;
35
bool singlestep_enabled;
36
-#ifdef CONFIG_USER_ONLY
37
- /*
38
- * Guest address of the last byte of the last protected page.
39
- *
40
- * Pages containing the translated instructions are made non-writable in
41
- * order to achieve consistency in case another thread is modifying the
42
- * code while translate_insn() fetches the instruction bytes piecemeal.
43
- * Such writer threads are blocked on mmap_lock() in page_unprotect().
44
- */
45
- target_ulong page_protect_end;
46
-#endif
47
+ void *host_addr[2];
48
} DisasContextBase;
49
50
/**
51
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
52
* the relevant information at translation time.
53
*/
54
55
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
56
- type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
57
- abi_ptr pc, bool do_swap); \
58
- static inline type fullname(CPUArchState *env, \
59
- DisasContextBase *dcbase, abi_ptr pc) \
60
- { \
61
- return fullname ## _swap(env, dcbase, pc, false); \
62
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
63
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
64
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
65
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
66
+
67
+static inline uint16_t
68
+translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
69
+ abi_ptr pc, bool do_swap)
70
+{
71
+ uint16_t ret = translator_lduw(env, db, pc);
72
+ if (do_swap) {
73
+ ret = bswap16(ret);
74
}
75
+ return ret;
76
+}
77
78
-#define FOR_EACH_TRANSLATOR_LD(F) \
79
- F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
80
- F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
81
- F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
82
- F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
83
+static inline uint32_t
84
+translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
85
+ abi_ptr pc, bool do_swap)
86
+{
87
+ uint32_t ret = translator_ldl(env, db, pc);
88
+ if (do_swap) {
89
+ ret = bswap32(ret);
90
+ }
91
+ return ret;
92
+}
93
94
-FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
95
-
96
-#undef GEN_TRANSLATOR_LD
97
+static inline uint64_t
98
+translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
99
+ abi_ptr pc, bool do_swap)
100
+{
101
+ uint64_t ret = translator_ldq_swap(env, db, pc, false);
102
+ if (do_swap) {
103
+ ret = bswap64(ret);
104
+ }
105
+ return ret;
106
+}
107
108
/*
109
* Return whether addr is on the same page as where disassembly started.
110
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/accel/tcg/translate-all.c
113
+++ b/accel/tcg/translate-all.c
114
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
115
{
116
CPUArchState *env = cpu->env_ptr;
117
TranslationBlock *tb, *existing_tb;
118
- tb_page_addr_t phys_pc, phys_page2;
119
- target_ulong virt_page2;
120
+ tb_page_addr_t phys_pc;
121
tcg_insn_unit *gen_code_buf;
122
int gen_code_size, search_size, max_insns;
123
#ifdef CONFIG_PROFILER
124
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
125
tb->flags = flags;
126
tb->cflags = cflags;
127
tb->trace_vcpu_dstate = *cpu->trace_dstate;
128
+ tb->page_addr[0] = phys_pc;
129
+ tb->page_addr[1] = -1;
130
tcg_ctx->tb_cflags = cflags;
131
tb_overflow:
132
133
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
134
}
135
136
/*
137
- * If the TB is not associated with a physical RAM page then
138
- * it must be a temporary one-insn TB, and we have nothing to do
139
- * except fill in the page_addr[] fields. Return early before
140
- * attempting to link to other TBs or add to the lookup table.
141
+ * If the TB is not associated with a physical RAM page then it must be
142
+ * a temporary one-insn TB, and we have nothing left to do. Return early
143
+ * before attempting to link to other TBs or add to the lookup table.
144
*/
145
- if (phys_pc == -1) {
146
- tb->page_addr[0] = tb->page_addr[1] = -1;
147
+ if (tb->page_addr[0] == -1) {
148
return tb;
149
}
150
151
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
152
*/
153
tcg_tb_insert(tb);
154
155
- /* check next page if needed */
156
- virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
157
- phys_page2 = -1;
158
- if ((pc & TARGET_PAGE_MASK) != virt_page2) {
159
- phys_page2 = get_page_addr_code(env, virt_page2);
160
- }
161
/*
162
* No explicit memory barrier is required -- tb_link_page() makes the
163
* TB visible in a consistent state.
164
*/
165
- existing_tb = tb_link_page(tb, phys_pc, phys_page2);
166
+ existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]);
167
/* if the TB already exists, discard what we just translated */
168
if (unlikely(existing_tb != tb)) {
169
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
170
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/accel/tcg/translator.c
173
+++ b/accel/tcg/translator.c
174
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
175
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
176
}
21
}
177
22
178
-static inline void translator_page_protect(DisasContextBase *dcbase,
23
-static bool is_zero_prod(float32 a, float32 b)
179
- target_ulong pc)
180
-{
24
-{
181
-#ifdef CONFIG_USER_ONLY
25
- return ((float32_is_zero(a) && is_finite(b)) ||
182
- dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
26
- (float32_is_zero(b) && is_finite(a)));
183
- page_protect(pc);
184
-#endif
185
-}
27
-}
186
-
28
-
187
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
188
target_ulong pc, void *host_pc,
30
-{
189
const TranslatorOps *ops, DisasContextBase *db)
31
- float32 ret = dst;
190
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
32
- if (float32_is_any_nan(x)) {
191
db->num_insns = 0;
33
- if (extract32(x, 22, 1) == 0) {
192
db->max_insns = max_insns;
34
- float_raise(float_flag_invalid, fp_status);
193
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
35
- }
194
- translator_page_protect(db, db->pc_next);
36
- ret = make_float32(0xffffffff); /* nan */
195
+ db->host_addr[0] = host_pc;
37
- }
196
+ db->host_addr[1] = NULL;
38
- return ret;
39
-}
40
-
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
42
float32 RsV, float32 RtV, float32 PuV)
43
{
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
45
return RxV;
46
}
47
48
-static bool is_inf_prod(int32_t a, int32_t b)
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
50
+ float32 RsV, float32 RtV, int negate)
51
{
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
55
+ int flags;
197
+
56
+
198
+#ifdef CONFIG_USER_ONLY
57
+ arch_fpop_start(env);
199
+ page_protect(pc);
200
+#endif
201
202
ops->init_disas_context(db, cpu);
203
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
204
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
205
#endif
206
}
207
208
-static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
209
- target_ulong pc, size_t len)
210
+static void *translator_access(CPUArchState *env, DisasContextBase *db,
211
+ target_ulong pc, size_t len)
212
{
213
-#ifdef CONFIG_USER_ONLY
214
- target_ulong end = pc + len - 1;
215
+ void *host;
216
+ target_ulong base, end;
217
+ TranslationBlock *tb;
218
219
- if (end > dcbase->page_protect_end) {
220
- translator_page_protect(dcbase, end);
221
+ tb = db->tb;
222
+
58
+
223
+ /* Use slow path if first page is MMIO. */
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
224
+ if (unlikely(tb->page_addr[0] == -1)) {
60
+ RxV = float32_muladd(RsV, RtV, RxV,
225
+ return NULL;
61
+ negate | float_muladd_suppress_add_product_zero,
226
}
62
+ &env->fp_status);
227
+
63
+
228
+ end = pc + len - 1;
64
+ flags = get_float_exception_flags(&env->fp_status);
229
+ if (likely(is_same_page(db, end))) {
65
+ if (flags) {
230
+ host = db->host_addr[0];
66
+ /* Flags are suppressed by this instruction. */
231
+ base = db->pc_first;
67
+ set_float_exception_flags(0, &env->fp_status);
232
+ } else {
233
+ host = db->host_addr[1];
234
+ base = TARGET_PAGE_ALIGN(db->pc_first);
235
+ if (host == NULL) {
236
+ tb->page_addr[1] =
237
+ get_page_addr_code_hostp(env, base, &db->host_addr[1]);
238
+#ifdef CONFIG_USER_ONLY
239
+ page_protect(end);
240
#endif
241
+ /* We cannot handle MMIO as second page. */
242
+ assert(tb->page_addr[1] != -1);
243
+ host = db->host_addr[1];
244
+ }
245
+
68
+
246
+ /* Use slow path when crossing pages. */
69
+ /* Return 0 for Inf - Inf. */
247
+ if (is_same_page(db, pc)) {
70
+ if (flags & float_flag_invalid_isi) {
248
+ return NULL;
71
+ RxV = 0;
249
+ }
72
+ }
250
+ }
73
+ }
251
+
74
+
252
+ tcg_debug_assert(pc >= base);
75
+ arch_fpop_end(env);
253
+ return host + (pc - base);
76
+ return RxV;
254
}
77
}
255
78
256
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
257
- type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
80
float32 RsV, float32 RtV)
258
- abi_ptr pc, bool do_swap) \
81
{
259
- { \
82
- bool infinp;
260
- translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
83
- bool infminusinf;
261
- type ret = load_fn(env, pc); \
84
- float32 tmp;
262
- if (do_swap) { \
85
-
263
- ret = swap_fn(ret); \
86
- arch_fpop_start(env);
264
- } \
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
265
- plugin_insn_append(pc, &ret, sizeof(ret)); \
88
- infminusinf = float32_is_infinity(RxV) &&
266
- return ret; \
89
- is_inf_prod(RsV, RtV) &&
267
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
268
+{
91
- infinp = float32_is_infinity(RxV) ||
269
+ uint8_t ret;
92
- float32_is_infinity(RtV) ||
270
+ void *p = translator_access(env, db, pc, sizeof(ret));
93
- float32_is_infinity(RsV);
271
+
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
272
+ if (p) {
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
273
+ plugin_insn_append(pc, p, sizeof(ret));
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
274
+ return ldub_p(p);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
275
}
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
276
+ ret = cpu_ldub_code(env, pc);
99
- RxV = tmp;
277
+ plugin_insn_append(pc, &ret, sizeof(ret));
100
- }
278
+ return ret;
101
- set_float_exception_flags(0, &env->fp_status);
279
+}
102
- if (float32_is_infinity(RxV) && !infinp) {
280
103
- RxV = RxV - 1;
281
-FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
104
- }
282
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
105
- if (infminusinf) {
283
+{
106
- RxV = 0;
284
+ uint16_t ret, plug;
107
- }
285
+ void *p = translator_access(env, db, pc, sizeof(ret));
108
- arch_fpop_end(env);
286
109
- return RxV;
287
-#undef GEN_TRANSLATOR_LD
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
288
+ if (p) {
111
}
289
+ plugin_insn_append(pc, p, sizeof(ret));
112
290
+ return lduw_p(p);
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
291
+ }
114
float32 RsV, float32 RtV)
292
+ ret = cpu_lduw_code(env, pc);
115
{
293
+ plug = tswap16(ret);
116
- bool infinp;
294
+ plugin_insn_append(pc, &plug, sizeof(ret));
117
- bool infminusinf;
295
+ return ret;
118
- float32 tmp;
296
+}
119
-
297
+
120
- arch_fpop_start(env);
298
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
299
+{
122
- infminusinf = float32_is_infinity(RxV) &&
300
+ uint32_t ret, plug;
123
- is_inf_prod(RsV, RtV) &&
301
+ void *p = translator_access(env, db, pc, sizeof(ret));
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
302
+
125
- infinp = float32_is_infinity(RxV) ||
303
+ if (p) {
126
- float32_is_infinity(RtV) ||
304
+ plugin_insn_append(pc, p, sizeof(ret));
127
- float32_is_infinity(RsV);
305
+ return ldl_p(p);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
306
+ }
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
307
+ ret = cpu_ldl_code(env, pc);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
308
+ plug = tswap32(ret);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
309
+ plugin_insn_append(pc, &plug, sizeof(ret));
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
310
+ return ret;
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
311
+}
134
- RxV = tmp;
312
+
135
- }
313
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
136
- set_float_exception_flags(0, &env->fp_status);
314
+{
137
- if (float32_is_infinity(RxV) && !infinp) {
315
+ uint64_t ret, plug;
138
- RxV = RxV - 1;
316
+ void *p = translator_access(env, db, pc, sizeof(ret));
139
- }
317
+
140
- if (infminusinf) {
318
+ if (p) {
141
- RxV = 0;
319
+ plugin_insn_append(pc, p, sizeof(ret));
142
- }
320
+ return ldq_p(p);
143
- arch_fpop_end(env);
321
+ }
144
- return RxV;
322
+ ret = cpu_ldq_code(env, pc);
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
323
+ plug = tswap64(ret);
146
}
324
+ plugin_insn_append(pc, &plug, sizeof(ret));
147
325
+ return ret;
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
326
+}
327
--
149
--
328
2.34.1
150
2.43.0
diff view generated by jsdifflib
1
It was non-obvious to me why we can raise an exception in
1
The function is now unused.
2
the middle of a comparison function, but it works.
3
While nearby, use TARGET_PAGE_ALIGN instead of open-coding.
4
2
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
accel/tcg/cpu-exec.c | 11 ++++++++++-
6
target/hexagon/fma_emu.h | 2 -
9
1 file changed, 10 insertions(+), 1 deletion(-)
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
10
9
11
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cpu-exec.c
12
--- a/target/hexagon/fma_emu.h
14
+++ b/accel/tcg/cpu-exec.c
13
+++ b/target/hexagon/fma_emu.h
15
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
16
tb_page_addr_t phys_page2;
15
}
17
target_ulong virt_page2;
16
int32_t float32_getexp(float32 f32);
18
17
float32 infinite_float32(uint8_t sign);
19
- virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
20
+ /*
19
- int scale, float_status *fp_status);
21
+ * We know that the first page matched, and an otherwise valid TB
20
float64 internal_mpyhh(float64 a, float64 b,
22
+ * encountered an incomplete instruction at the end of that page,
21
unsigned long long int accumulated,
23
+ * therefore we know that generating a new TB from the current PC
22
float_status *fp_status);
24
+ * must also require reading from the next page -- even if the
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
+ * second pages do not match, and therefore the resulting insn
24
index XXXXXXX..XXXXXXX 100644
26
+ * is different for the new TB. Therefore any exception raised
25
--- a/target/hexagon/fma_emu.c
27
+ * here by the faulting lookup is not premature.
26
+++ b/target/hexagon/fma_emu.c
28
+ */
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
29
+ virt_page2 = TARGET_PAGE_ALIGN(desc->pc);
28
return -1;
30
phys_page2 = get_page_addr_code(desc->env, virt_page2);
29
}
31
if (tb->page_addr[1] == phys_page2) {
30
32
return true;
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
33
--
219
--
34
2.34.1
220
2.43.0
diff view generated by jsdifflib
1
The only user can easily use translator_lduw and
1
This massive macro is now only used once.
2
adjust the type to signed during the return.
2
Expand it for use only by float64.
3
3
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/exec/translator.h | 1 -
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
10
target/i386/tcg/translate.c | 2 +-
8
1 file changed, 127 insertions(+), 128 deletions(-)
11
2 files changed, 1 insertion(+), 2 deletions(-)
12
9
13
diff --git a/include/exec/translator.h b/include/exec/translator.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/translator.h
12
--- a/target/hexagon/fma_emu.c
16
+++ b/include/exec/translator.h
13
+++ b/target/hexagon/fma_emu.c
17
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
18
19
#define FOR_EACH_TRANSLATOR_LD(F) \
20
F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
21
- F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
22
F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
23
F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
24
F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
25
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/i386/tcg/translate.c
28
+++ b/target/i386/tcg/translate.c
29
@@ -XXX,XX +XXX,XX @@ static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
30
31
static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
32
{
33
- return translator_ldsw(env, &s->base, advance_pc(env, s, 2));
34
+ return translator_lduw(env, &s->base, advance_pc(env, s, 2));
35
}
15
}
36
16
37
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
17
/* Return a maximum finite value with the requested sign */
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
20
-{ \
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
23
- /* result zero */ \
24
- switch (fp_status->float_rounding_mode) { \
25
- case float_round_down: \
26
- return zero_##SUFFIX(1); \
27
- default: \
28
- return zero_##SUFFIX(0); \
29
- } \
30
- } \
31
- /* Normalize right */ \
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
271
}
272
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
38
--
278
--
39
2.34.1
279
2.43.0
diff view generated by jsdifflib
1
The mmap_lock is held around tb_gen_code. While the comment
1
This structure, with bitfields, is incorrect for big-endian.
2
is correct that the lock is dropped when tb_gen_code runs out
2
Use the existing float32_getexp_raw which uses extract32.
3
of memory, the lock is *not* dropped when an exception is
4
raised reading code for translation.
5
3
6
Acked-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
accel/tcg/cpu-exec.c | 12 ++++++------
7
target/hexagon/fma_emu.c | 16 +++-------------
12
accel/tcg/user-exec.c | 3 ---
8
1 file changed, 3 insertions(+), 13 deletions(-)
13
2 files changed, 6 insertions(+), 9 deletions(-)
14
9
15
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
16
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cpu-exec.c
12
--- a/target/hexagon/fma_emu.c
18
+++ b/accel/tcg/cpu-exec.c
13
+++ b/target/hexagon/fma_emu.c
19
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
14
@@ -XXX,XX +XXX,XX @@ typedef union {
20
cpu_tb_exec(cpu, tb, &tb_exit);
15
};
21
cpu_exec_exit(cpu);
16
} Double;
22
} else {
17
23
- /*
18
-typedef union {
24
- * The mmap_lock is dropped by tb_gen_code if it runs out of
19
- float f;
25
- * memory.
20
- uint32_t i;
26
- */
21
- struct {
27
#ifndef CONFIG_SOFTMMU
22
- uint32_t mant:23;
28
clear_helper_retaddr();
23
- uint32_t exp:8;
29
- tcg_debug_assert(!have_mmap_lock());
24
- uint32_t sign:1;
30
+ if (have_mmap_lock()) {
25
- };
31
+ mmap_unlock();
26
-} Float;
32
+ }
27
-
33
#endif
28
static uint64_t float64_getmant(float64 f64)
34
if (qemu_mutex_iothread_locked()) {
29
{
35
qemu_mutex_unlock_iothread();
30
Double a = { .i = f64 };
36
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
37
32
38
#ifndef CONFIG_SOFTMMU
33
int32_t float32_getexp(float32 f32)
39
clear_helper_retaddr();
34
{
40
- tcg_debug_assert(!have_mmap_lock());
35
- Float a = { .i = f32 };
41
+ if (have_mmap_lock()) {
36
+ int exp = float32_getexp_raw(f32);
42
+ mmap_unlock();
37
if (float32_is_normal(f32)) {
43
+ }
38
- return a.exp;
44
#endif
39
+ return exp;
45
if (qemu_mutex_iothread_locked()) {
46
qemu_mutex_unlock_iothread();
47
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/accel/tcg/user-exec.c
50
+++ b/accel/tcg/user-exec.c
51
@@ -XXX,XX +XXX,XX @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
52
* (and if the translator doesn't handle page boundaries correctly
53
* there's little we can do about that here). Therefore, do not
54
* trigger the unwinder.
55
- *
56
- * Like tb_gen_code, release the memory lock before cpu_loop_exit.
57
*/
58
- mmap_unlock();
59
*pc = 0;
60
return MMU_INST_FETCH;
61
}
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
62
--
47
--
63
2.34.1
48
2.43.0
diff view generated by jsdifflib
1
We cannot deliver two interrupts simultaneously;
1
This structure, with bitfields, is incorrect for big-endian.
2
the first interrupt handler must execute first.
2
Use extract64 and deposit64 instead.
3
3
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
target/avr/helper.c | 9 +++------
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
9
1 file changed, 3 insertions(+), 6 deletions(-)
8
1 file changed, 16 insertions(+), 30 deletions(-)
10
9
11
diff --git a/target/avr/helper.c b/target/avr/helper.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/helper.c
12
--- a/target/hexagon/fma_emu.c
14
+++ b/target/avr/helper.c
13
+++ b/target/hexagon/fma_emu.c
15
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@
16
15
17
bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
16
#define WAY_BIG_EXP 4096
17
18
-typedef union {
19
- double f;
20
- uint64_t i;
21
- struct {
22
- uint64_t mant:52;
23
- uint64_t exp:11;
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
18
{
29
{
19
- bool ret = false;
30
- Double a = { .i = f64 };
20
AVRCPU *cpu = AVR_CPU(cs);
31
+ uint64_t mant = extract64(f64, 0, 52);
21
CPUAVRState *env = &cpu->env;
32
if (float64_is_normal(f64)) {
22
33
- return a.mant | 1ULL << 52;
23
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
34
+ return mant | 1ULL << 52;
24
avr_cpu_do_interrupt(cs);
35
}
25
36
if (float64_is_zero(f64)) {
26
cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
37
return 0;
27
-
38
}
28
- ret = true;
39
if (float64_is_denormal(f64)) {
29
+ return true;
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
44
}
45
46
int32_t float64_getexp(float64 f64)
47
{
48
- Double a = { .i = f64 };
49
+ int exp = extract64(f64, 52, 11);
50
if (float64_is_normal(f64)) {
51
- return a.exp;
52
+ return exp;
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
64
+ uint64_t ret;
65
+
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
67
&& ((a.guard | a.round | a.sticky) == 0)) {
68
/* result zero */
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
30
}
70
}
31
}
71
}
32
if (interrupt_request & CPU_INTERRUPT_HARD) {
72
/* Underflow? */
33
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
34
if (!env->intsrc) {
74
+ ret = int128_getlo(a.mant);
35
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
75
+ if (ret & (1ULL << DF_MANTBITS)) {
36
}
76
/* Leading one means: No, we're normal. So, we should be done... */
37
-
77
- Double ret;
38
- ret = true;
78
- ret.i = 0;
39
+ return true;
79
- ret.sign = a.sign;
40
}
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
41
}
87
}
42
- return ret;
88
- assert(a.exp == 1);
43
+ return false;
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
44
}
97
}
45
98
46
void avr_cpu_do_interrupt(CPUState *cs)
99
float64 internal_mpyhh(float64 a, float64 b,
47
--
100
--
48
2.34.1
101
2.43.0
49
50
diff view generated by jsdifflib
1
Simplify the implementation of get_page_addr_code_hostp
1
No need to open-code 64x64->128-bit multiplication.
2
by reusing the existing probe_access infrastructure.
3
2
4
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
accel/tcg/cputlb.c | 76 ++++++++++++++++------------------------------
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
9
1 file changed, 26 insertions(+), 50 deletions(-)
7
1 file changed, 3 insertions(+), 29 deletions(-)
10
8
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
11
--- a/target/hexagon/fma_emu.c
14
+++ b/accel/tcg/cputlb.c
12
+++ b/target/hexagon/fma_emu.c
15
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
16
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
14
return -1;
17
(ADDR) & TARGET_PAGE_MASK)
15
}
18
16
19
-/*
17
-static uint32_t int128_getw0(Int128 x)
20
- * Return a ram_addr_t for the virtual address for execution.
21
- *
22
- * Return -1 if we can't translate and execute from an entire page
23
- * of RAM. This will force us to execute by loading and translating
24
- * one insn at a time, without caching.
25
- *
26
- * NOTE: This function will trigger an exception if the page is
27
- * not executable.
28
- */
29
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
30
- void **hostp)
31
-{
18
-{
32
- uintptr_t mmu_idx = cpu_mmu_index(env, true);
19
- return int128_getlo(x);
33
- uintptr_t index = tlb_index(env, mmu_idx, addr);
20
-}
34
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
35
- void *p;
36
-
21
-
37
- if (unlikely(!tlb_hit(entry->addr_code, addr))) {
22
-static uint32_t int128_getw1(Int128 x)
38
- if (!VICTIM_TLB_HIT(addr_code, addr)) {
23
-{
39
- tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
24
- return int128_getlo(x) >> 32;
40
- index = tlb_index(env, mmu_idx, addr);
25
-}
41
- entry = tlb_entry(env, mmu_idx, addr);
42
-
26
-
43
- if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
44
- /*
28
{
45
- * The MMU protection covers a smaller range than a target
29
- Int128 a, b;
46
- * page, so we must redo the MMU check for every insn.
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
47
- */
31
+ uint64_t l, h;
48
- return -1;
32
49
- }
33
- a = int128_make64(ai);
50
- }
34
- b = int128_make64(bi);
51
- assert(tlb_hit(entry->addr_code, addr));
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
39
-
40
- pp1s = pp1a + pp1b;
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
42
- pp2 += (1ULL << 32);
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
52
- }
47
- }
53
-
48
-
54
- if (unlikely(entry->addr_code & TLB_MMIO)) {
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
55
- /* The region is not backed by RAM. */
50
+ mulu64(&l, &h, ai, bi);
56
- if (hostp) {
51
+ return int128_make128(l, h);
57
- *hostp = NULL;
58
- }
59
- return -1;
60
- }
61
-
62
- p = (void *)((uintptr_t)addr + entry->addend);
63
- if (hostp) {
64
- *hostp = p;
65
- }
66
- return qemu_ram_addr_from_host_nofail(p);
67
-}
68
-
69
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
70
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
71
{
72
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
73
return flags ? NULL : host;
74
}
52
}
75
53
76
+/*
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
77
+ * Return a ram_addr_t for the virtual address for execution.
78
+ *
79
+ * Return -1 if we can't translate and execute from an entire page
80
+ * of RAM. This will force us to execute by loading and translating
81
+ * one insn at a time, without caching.
82
+ *
83
+ * NOTE: This function will trigger an exception if the page is
84
+ * not executable.
85
+ */
86
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
87
+ void **hostp)
88
+{
89
+ void *p;
90
+
91
+ (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
92
+ cpu_mmu_index(env, true), false, &p, 0);
93
+ if (p == NULL) {
94
+ return -1;
95
+ }
96
+ if (hostp) {
97
+ *hostp = p;
98
+ }
99
+ return qemu_ram_addr_from_host_nofail(p);
100
+}
101
+
102
#ifdef CONFIG_PLUGIN
103
/*
104
* Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
105
--
55
--
106
2.34.1
56
2.43.0
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means
1
Initialize x with accumulated via direct assignment,
2
that we've got to put this code into a section that is
2
rather than multiplying by 1.
3
both writable and executable.
4
3
5
Note that this test did not run on hardware beforehand either.
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
tests/tcg/i386/test-i386.c | 2 +-
7
target/hexagon/fma_emu.c | 2 +-
12
1 file changed, 1 insertion(+), 1 deletion(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
13
9
14
diff --git a/tests/tcg/i386/test-i386.c b/tests/tcg/i386/test-i386.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/tcg/i386/test-i386.c
12
--- a/target/hexagon/fma_emu.c
17
+++ b/tests/tcg/i386/test-i386.c
13
+++ b/target/hexagon/fma_emu.c
18
@@ -XXX,XX +XXX,XX @@ uint8_t code[] = {
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
19
0xc3, /* ret */
15
float64_is_infinity(b)) {
20
};
16
return float64_mul(a, b, fp_status);
21
17
}
22
-asm(".section \".data\"\n"
18
- x.mant = int128_mul_6464(accumulated, 1);
23
+asm(".section \".data_x\",\"awx\"\n"
19
+ x.mant = int128_make64(accumulated);
24
"smc_code2:\n"
20
x.sticky = sticky;
25
"movl 4(%esp), %eax\n"
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
26
"movl %eax, smc_patch_addr2 + 1\n"
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
27
--
23
--
28
2.34.1
24
2.43.0
diff view generated by jsdifflib
1
Pass these along to translator_loop -- pc may be used instead
1
Convert all targets simultaneously, as the gen_intermediate_code
2
of tb->pc, and host_pc is currently unused. Adjust all targets
2
function disappears from the target. While there are possible
3
at one time.
3
workarounds, they're larger than simply performing the conversion.
4
4
5
Acked-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
include/exec/exec-all.h | 1 -
8
include/exec/translator.h | 14 --------------
11
include/exec/translator.h | 24 ++++++++++++++++++++----
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
12
accel/tcg/translate-all.c | 6 ++++--
10
target/alpha/cpu.h | 2 ++
13
accel/tcg/translator.c | 9 +++++----
11
target/arm/internals.h | 2 ++
14
target/alpha/translate.c | 5 +++--
12
target/avr/cpu.h | 2 ++
15
target/arm/translate.c | 5 +++--
13
target/hexagon/cpu.h | 2 ++
16
target/avr/translate.c | 5 +++--
14
target/hppa/cpu.h | 2 ++
17
target/cris/translate.c | 5 +++--
15
target/i386/tcg/helper-tcg.h | 2 ++
18
target/hexagon/translate.c | 6 ++++--
16
target/loongarch/internals.h | 2 ++
19
target/hppa/translate.c | 5 +++--
17
target/m68k/cpu.h | 2 ++
20
target/i386/tcg/translate.c | 5 +++--
18
target/microblaze/cpu.h | 2 ++
21
target/loongarch/translate.c | 6 ++++--
19
target/mips/tcg/tcg-internal.h | 2 ++
22
target/m68k/translate.c | 5 +++--
20
target/openrisc/cpu.h | 2 ++
23
target/microblaze/translate.c | 5 +++--
21
target/ppc/cpu.h | 2 ++
24
target/mips/tcg/translate.c | 5 +++--
22
target/riscv/cpu.h | 3 +++
25
target/nios2/translate.c | 5 +++--
23
target/rx/cpu.h | 2 ++
26
target/openrisc/translate.c | 6 ++++--
24
target/s390x/s390x-internal.h | 2 ++
27
target/ppc/translate.c | 5 +++--
25
target/sh4/cpu.h | 2 ++
28
target/riscv/translate.c | 5 +++--
26
target/sparc/cpu.h | 2 ++
29
target/rx/translate.c | 5 +++--
27
target/tricore/cpu.h | 2 ++
30
target/s390x/tcg/translate.c | 5 +++--
28
target/xtensa/cpu.h | 2 ++
31
target/sh4/translate.c | 5 +++--
29
accel/tcg/cpu-exec.c | 8 +++++---
32
target/sparc/translate.c | 5 +++--
30
accel/tcg/translate-all.c | 8 +++++---
33
target/tricore/translate.c | 6 ++++--
31
target/alpha/cpu.c | 1 +
34
target/xtensa/translate.c | 6 ++++--
32
target/alpha/translate.c | 4 ++--
35
25 files changed, 97 insertions(+), 53 deletions(-)
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
36
71
37
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/include/exec/exec-all.h
40
+++ b/include/exec/exec-all.h
41
@@ -XXX,XX +XXX,XX @@ typedef ram_addr_t tb_page_addr_t;
42
#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
43
#endif
44
45
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
46
void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
47
target_ulong *data);
48
49
diff --git a/include/exec/translator.h b/include/exec/translator.h
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
50
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
51
--- a/include/exec/translator.h
74
--- a/include/exec/translator.h
52
+++ b/include/exec/translator.h
75
+++ b/include/exec/translator.h
53
@@ -XXX,XX +XXX,XX @@
76
@@ -XXX,XX +XXX,XX @@
54
#include "exec/translate-all.h"
77
#include "qemu/bswap.h"
55
#include "tcg/tcg.h"
78
#include "exec/vaddr.h"
56
79
57
+/**
80
-/**
58
+ * gen_intermediate_code
81
- * gen_intermediate_code
59
+ * @cpu: cpu context
82
- * @cpu: cpu context
60
+ * @tb: translation block
83
- * @tb: translation block
61
+ * @max_insns: max number of instructions to translate
84
- * @max_insns: max number of instructions to translate
62
+ * @pc: guest virtual program counter address
85
- * @pc: guest virtual program counter address
63
+ * @host_pc: host physical program counter address
86
- * @host_pc: host physical program counter address
64
+ *
87
- *
65
+ * This function must be provided by the target, which should create
88
- * This function must be provided by the target, which should create
66
+ * the target-specific DisasContext, and then invoke translator_loop.
89
- * the target-specific DisasContext, and then invoke translator_loop.
67
+ */
90
- */
68
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
69
+ target_ulong pc, void *host_pc);
92
- vaddr pc, void *host_pc);
70
93
-
71
/**
94
/**
72
* DisasJumpType:
95
* DisasJumpType:
73
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
96
* @DISAS_NEXT: Next instruction in program order.
74
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
75
/**
98
index XXXXXXX..XXXXXXX 100644
76
* translator_loop:
99
--- a/include/hw/core/tcg-cpu-ops.h
77
- * @ops: Target-specific operations.
100
+++ b/include/hw/core/tcg-cpu-ops.h
78
- * @db: Disassembly context.
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
79
* @cpu: Target vCPU.
102
* Called when the first CPU is realized.
80
* @tb: Translation block.
103
*/
81
* @max_insns: Maximum number of insns to translate.
104
void (*initialize)(void);
82
+ * @pc: guest virtual program counter address
105
+ /**
83
+ * @host_pc: host physical program counter address
106
+ * @translate_code: Translate guest instructions to TCGOps
84
+ * @ops: Target-specific operations.
107
+ * @cpu: cpu context
85
+ * @db: Disassembly context.
108
+ * @tb: translation block
86
*
109
+ * @max_insns: max number of instructions to translate
87
* Generic translator loop.
110
+ * @pc: guest virtual program counter address
88
*
111
+ * @host_pc: host physical program counter address
89
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
112
+ *
90
* - When single-stepping is enabled (system-wide or on the current vCPU).
113
+ * This function must be provided by the target, which should create
91
* - When too many instructions have been translated.
114
+ * the target-specific DisasContext, and then invoke translator_loop.
92
*/
115
+ */
93
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
94
- CPUState *cpu, TranslationBlock *tb, int max_insns);
117
+ int *max_insns, vaddr pc, void *host_pc);
95
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
118
/**
96
+ target_ulong pc, void *host_pc,
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
97
+ const TranslatorOps *ops, DisasContextBase *db);
120
*
98
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
99
void translator_loop_temp_check(DisasContextBase *db);
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
100
389
101
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
102
index XXXXXXX..XXXXXXX 100644
391
index XXXXXXX..XXXXXXX 100644
103
--- a/accel/tcg/translate-all.c
392
--- a/accel/tcg/translate-all.c
104
+++ b/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
105
@@ -XXX,XX +XXX,XX @@
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
106
395
107
#include "exec/cputlb.h"
108
#include "exec/translate-all.h"
109
+#include "exec/translator.h"
110
#include "qemu/bitmap.h"
111
#include "qemu/qemu-print.h"
112
#include "qemu/timer.h"
113
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
114
TCGProfile *prof = &tcg_ctx->prof;
115
int64_t ti;
116
#endif
117
+ void *host_pc;
118
119
assert_memory_lock();
120
qemu_thread_jit_write();
121
122
- phys_pc = get_page_addr_code(env, pc);
123
+ phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
124
125
if (phys_pc == -1) {
126
/* Generate a one-shot TB with 1 insn in it */
127
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
128
tcg_func_start(tcg_ctx);
396
tcg_func_start(tcg_ctx);
129
397
130
tcg_ctx->cpu = env_cpu(env);
398
- tcg_ctx->cpu = env_cpu(env);
131
- gen_intermediate_code(cpu, tb, max_insns);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
132
+ gen_intermediate_code(cpu, tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
133
assert(tb->size != 0);
404
assert(tb->size != 0);
134
tcg_ctx->cpu = NULL;
405
tcg_ctx->cpu = NULL;
135
max_insns = tb->icount;
406
*max_insns = tb->icount;
136
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
137
index XXXXXXX..XXXXXXX 100644
408
/*
138
--- a/accel/tcg/translator.c
409
* Overflow of code_gen_buffer, or the current slice of it.
139
+++ b/accel/tcg/translator.c
410
*
140
@@ -XXX,XX +XXX,XX @@ static inline void translator_page_protect(DisasContextBase *dcbase,
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
141
#endif
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
142
}
413
* should we re-do the tcg optimization currently hidden
143
414
* inside tcg_gen_code. All that should be required is to
144
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
415
* flush the TBs, allocate a new TB, re-initialize it per
145
- CPUState *cpu, TranslationBlock *tb, int max_insns)
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
146
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
417
index XXXXXXX..XXXXXXX 100644
147
+ target_ulong pc, void *host_pc,
418
--- a/target/alpha/cpu.c
148
+ const TranslatorOps *ops, DisasContextBase *db)
419
+++ b/target/alpha/cpu.c
149
{
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
150
uint32_t cflags = tb_cflags(tb);
421
151
bool plugin_enabled;
422
static const TCGCPUOps alpha_tcg_ops = {
152
423
.initialize = alpha_translate_init,
153
/* Initialize DisasContext */
424
+ .translate_code = alpha_translate_code,
154
db->tb = tb;
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
155
- db->pc_first = tb->pc;
426
.restore_state_to_opc = alpha_restore_state_to_opc,
156
- db->pc_next = db->pc_first;
427
157
+ db->pc_first = pc;
158
+ db->pc_next = pc;
159
db->is_jmp = DISAS_NEXT;
160
db->num_insns = 0;
161
db->max_insns = max_insns;
162
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
163
index XXXXXXX..XXXXXXX 100644
429
index XXXXXXX..XXXXXXX 100644
164
--- a/target/alpha/translate.c
430
--- a/target/alpha/translate.c
165
+++ b/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
166
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
167
.disas_log = alpha_tr_disas_log,
433
.tb_stop = alpha_tr_tb_stop,
168
};
434
};
169
435
170
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
171
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
437
- vaddr pc, void *host_pc)
172
+ target_ulong pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
173
{
440
{
174
DisasContext dc;
441
DisasContext dc;
175
- translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
176
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
177
}
444
index XXXXXXX..XXXXXXX 100644
178
445
--- a/target/arm/cpu.c
179
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
446
+++ b/target/arm/cpu.c
180
diff --git a/target/arm/translate.c b/target/arm/translate.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
181
index XXXXXXX..XXXXXXX 100644
448
#ifdef CONFIG_TCG
182
--- a/target/arm/translate.c
449
static const TCGCPUOps arm_tcg_ops = {
183
+++ b/target/arm/translate.c
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
184
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
185
};
472
.tb_stop = arm_tr_tb_stop,
186
473
};
187
/* generate intermediate code for basic block 'tb'. */
474
188
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
475
-/* generate intermediate code for basic block 'tb'. */
189
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
190
+ target_ulong pc, void *host_pc)
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
191
{
480
{
192
DisasContext dc = { };
481
DisasContext dc = { };
193
const TranslatorOps *ops = &arm_translator_ops;
482
const TranslatorOps *ops = &arm_translator_ops;
194
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
195
}
484
index XXXXXXX..XXXXXXX 100644
196
#endif
485
--- a/target/avr/cpu.c
197
486
+++ b/target/avr/cpu.c
198
- translator_loop(ops, &dc.base, cpu, tb, max_insns);
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
199
+ translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
488
200
}
489
static const TCGCPUOps avr_tcg_ops = {
201
490
.initialize = avr_cpu_tcg_init,
202
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
203
diff --git a/target/avr/translate.c b/target/avr/translate.c
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
204
index XXXXXXX..XXXXXXX 100644
496
index XXXXXXX..XXXXXXX 100644
205
--- a/target/avr/translate.c
497
--- a/target/avr/translate.c
206
+++ b/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
207
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
208
.disas_log = avr_tr_disas_log,
509
.tb_stop = avr_tr_tb_stop,
209
};
510
};
210
511
211
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
212
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
513
- vaddr pc, void *host_pc)
213
+ target_ulong pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
214
{
516
{
215
DisasContext dc = { };
517
DisasContext dc = { };
216
- translator_loop(&avr_tr_ops, &dc.base, cs, tb, max_insns);
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
217
+ translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
218
}
520
index XXXXXXX..XXXXXXX 100644
219
521
--- a/target/hexagon/cpu.c
220
void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,
522
+++ b/target/hexagon/cpu.c
221
diff --git a/target/cris/translate.c b/target/cris/translate.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
222
index XXXXXXX..XXXXXXX 100644
524
223
--- a/target/cris/translate.c
525
static const TCGCPUOps hexagon_tcg_ops = {
224
+++ b/target/cris/translate.c
526
.initialize = hexagon_translate_init,
225
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps cris_tr_ops = {
527
+ .translate_code = hexagon_translate_code,
226
.disas_log = cris_tr_disas_log,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
227
};
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
228
530
};
229
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
230
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
231
+ target_ulong pc, void *host_pc)
232
{
233
DisasContext dc;
234
- translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns);
235
+ translator_loop(cs, tb, max_insns, pc, host_pc, &cris_tr_ops, &dc.base);
236
}
237
238
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)
239
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
240
index XXXXXXX..XXXXXXX 100644
532
index XXXXXXX..XXXXXXX 100644
241
--- a/target/hexagon/translate.c
533
--- a/target/hexagon/translate.c
242
+++ b/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
243
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
244
.disas_log = hexagon_tr_disas_log,
536
.tb_stop = hexagon_tr_tb_stop,
245
};
537
};
246
538
247
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
248
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
540
- vaddr pc, void *host_pc)
249
+ target_ulong pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
250
{
543
{
251
DisasContext ctx;
544
DisasContext ctx;
252
545
253
- translator_loop(&hexagon_tr_ops, &ctx.base, cs, tb, max_insns);
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
254
+ translator_loop(cs, tb, max_insns, pc, host_pc,
547
index XXXXXXX..XXXXXXX 100644
255
+ &hexagon_tr_ops, &ctx.base);
548
--- a/target/hppa/cpu.c
256
}
549
+++ b/target/hppa/cpu.c
257
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
258
#define NAME_LEN 64
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
259
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
260
index XXXXXXX..XXXXXXX 100644
559
index XXXXXXX..XXXXXXX 100644
261
--- a/target/hppa/translate.c
560
--- a/target/hppa/translate.c
262
+++ b/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
263
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
264
.disas_log = hppa_tr_disas_log,
563
#endif
265
};
564
};
266
565
267
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
268
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
567
- vaddr pc, void *host_pc)
269
+ target_ulong pc, void *host_pc)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
270
{
569
+ int *max_insns, vaddr pc, void *host_pc)
271
DisasContext ctx;
570
{
272
- translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
571
DisasContext ctx = { };
273
+ translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
274
}
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
275
574
index XXXXXXX..XXXXXXX 100644
276
void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
277
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
278
index XXXXXXX..XXXXXXX 100644
586
index XXXXXXX..XXXXXXX 100644
279
--- a/target/i386/tcg/translate.c
587
--- a/target/i386/tcg/translate.c
280
+++ b/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
281
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
282
};
590
.tb_stop = i386_tr_tb_stop,
283
591
};
284
/* generate intermediate code for basic block 'tb'. */
592
285
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
593
-/* generate intermediate code for basic block 'tb'. */
286
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
287
+ target_ulong pc, void *host_pc)
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
288
{
598
{
289
DisasContext dc;
599
DisasContext dc;
290
600
291
- translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns);
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
292
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
602
index XXXXXXX..XXXXXXX 100644
293
}
603
--- a/target/loongarch/cpu.c
294
604
+++ b/target/loongarch/cpu.c
295
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
296
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
606
297
index XXXXXXX..XXXXXXX 100644
607
static const TCGCPUOps loongarch_tcg_ops = {
298
--- a/target/loongarch/translate.c
608
.initialize = loongarch_translate_init,
299
+++ b/target/loongarch/translate.c
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
300
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
301
.disas_log = loongarch_tr_disas_log,
618
.tb_stop = loongarch_tr_tb_stop,
302
};
619
};
303
620
304
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
305
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
622
- vaddr pc, void *host_pc)
306
+ target_ulong pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
307
{
625
{
308
DisasContext ctx;
626
DisasContext ctx;
309
627
310
- translator_loop(&loongarch_tr_ops, &ctx.base, cs, tb, max_insns);
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
311
+ translator_loop(cs, tb, max_insns, pc, host_pc,
629
index XXXXXXX..XXXXXXX 100644
312
+ &loongarch_tr_ops, &ctx.base);
630
--- a/target/m68k/cpu.c
313
}
631
+++ b/target/m68k/cpu.c
314
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
315
void loongarch_translate_init(void)
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
316
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
317
index XXXXXXX..XXXXXXX 100644
641
index XXXXXXX..XXXXXXX 100644
318
--- a/target/m68k/translate.c
642
--- a/target/m68k/translate.c
319
+++ b/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
320
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
321
.disas_log = m68k_tr_disas_log,
645
.tb_stop = m68k_tr_tb_stop,
322
};
646
};
323
647
324
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
325
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
649
- vaddr pc, void *host_pc)
326
+ target_ulong pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
327
{
652
{
328
DisasContext dc;
653
DisasContext dc;
329
- translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
330
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
331
}
656
index XXXXXXX..XXXXXXX 100644
332
657
--- a/target/microblaze/cpu.c
333
static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
334
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
335
index XXXXXXX..XXXXXXX 100644
668
index XXXXXXX..XXXXXXX 100644
336
--- a/target/microblaze/translate.c
669
--- a/target/microblaze/translate.c
337
+++ b/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
338
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
339
.disas_log = mb_tr_disas_log,
672
.tb_stop = mb_tr_tb_stop,
340
};
673
};
341
674
342
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
343
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
676
- vaddr pc, void *host_pc)
344
+ target_ulong pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
345
{
679
{
346
DisasContext dc;
680
DisasContext dc;
347
- translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
348
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
349
}
683
index XXXXXXX..XXXXXXX 100644
350
684
--- a/target/mips/cpu.c
351
void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
352
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
353
index XXXXXXX..XXXXXXX 100644
695
index XXXXXXX..XXXXXXX 100644
354
--- a/target/mips/tcg/translate.c
696
--- a/target/mips/tcg/translate.c
355
+++ b/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
356
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
357
.disas_log = mips_tr_disas_log,
699
.tb_stop = mips_tr_tb_stop,
358
};
700
};
359
701
360
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
361
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
703
- vaddr pc, void *host_pc)
362
+ target_ulong pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
363
{
706
{
364
DisasContext ctx;
707
DisasContext ctx;
365
708
366
- translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns);
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
367
+ translator_loop(cs, tb, max_insns, pc, host_pc, &mips_tr_ops, &ctx.base);
710
index XXXXXXX..XXXXXXX 100644
368
}
711
--- a/target/openrisc/cpu.c
369
712
+++ b/target/openrisc/cpu.c
370
void mips_tcg_init(void)
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
371
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
714
372
index XXXXXXX..XXXXXXX 100644
715
static const TCGCPUOps openrisc_tcg_ops = {
373
--- a/target/nios2/translate.c
716
.initialize = openrisc_translate_init,
374
+++ b/target/nios2/translate.c
717
+ .translate_code = openrisc_translate_code,
375
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps nios2_tr_ops = {
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
376
.disas_log = nios2_tr_disas_log,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
377
};
720
378
379
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
380
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
381
+ target_ulong pc, void *host_pc)
382
{
383
DisasContext dc;
384
- translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns);
385
+ translator_loop(cs, tb, max_insns, pc, host_pc, &nios2_tr_ops, &dc.base);
386
}
387
388
void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags)
389
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
390
index XXXXXXX..XXXXXXX 100644
722
index XXXXXXX..XXXXXXX 100644
391
--- a/target/openrisc/translate.c
723
--- a/target/openrisc/translate.c
392
+++ b/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
393
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
394
.disas_log = openrisc_tr_disas_log,
726
.tb_stop = openrisc_tr_tb_stop,
395
};
727
};
396
728
397
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
398
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
730
- vaddr pc, void *host_pc)
399
+ target_ulong pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
400
{
733
{
401
DisasContext ctx;
734
DisasContext ctx;
402
735
403
- translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
404
+ translator_loop(cs, tb, max_insns, pc, host_pc,
737
index XXXXXXX..XXXXXXX 100644
405
+ &openrisc_tr_ops, &ctx.base);
738
--- a/target/ppc/cpu_init.c
406
}
739
+++ b/target/ppc/cpu_init.c
407
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
408
void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
409
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
410
index XXXXXXX..XXXXXXX 100644
749
index XXXXXXX..XXXXXXX 100644
411
--- a/target/ppc/translate.c
750
--- a/target/ppc/translate.c
412
+++ b/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
413
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
414
.disas_log = ppc_tr_disas_log,
753
.tb_stop = ppc_tr_tb_stop,
415
};
754
};
416
755
417
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
418
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
757
- vaddr pc, void *host_pc)
419
+ target_ulong pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
420
{
760
{
421
DisasContext ctx;
761
DisasContext ctx;
422
762
423
- translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns);
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
424
+ translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
764
index XXXXXXX..XXXXXXX 100644
425
}
765
--- a/target/riscv/tcg/tcg-cpu.c
426
766
+++ b/target/riscv/tcg/tcg-cpu.c
427
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
428
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
429
index XXXXXXX..XXXXXXX 100644
776
index XXXXXXX..XXXXXXX 100644
430
--- a/target/riscv/translate.c
777
--- a/target/riscv/translate.c
431
+++ b/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
433
.disas_log = riscv_tr_disas_log,
780
.tb_stop = riscv_tr_tb_stop,
434
};
781
};
435
782
436
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
437
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
784
- vaddr pc, void *host_pc)
438
+ target_ulong pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
439
{
787
{
440
DisasContext ctx;
788
DisasContext ctx;
441
789
442
- translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
443
+ translator_loop(cs, tb, max_insns, pc, host_pc, &riscv_tr_ops, &ctx.base);
791
index XXXXXXX..XXXXXXX 100644
444
}
792
--- a/target/rx/cpu.c
445
793
+++ b/target/rx/cpu.c
446
void riscv_translate_init(void)
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
447
diff --git a/target/rx/translate.c b/target/rx/translate.c
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
448
index XXXXXXX..XXXXXXX 100644
803
index XXXXXXX..XXXXXXX 100644
449
--- a/target/rx/translate.c
804
--- a/target/rx/translate.c
450
+++ b/target/rx/translate.c
805
+++ b/target/rx/translate.c
451
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
452
.disas_log = rx_tr_disas_log,
807
.tb_stop = rx_tr_tb_stop,
453
};
808
};
454
809
455
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
456
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
811
- vaddr pc, void *host_pc)
457
+ target_ulong pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
458
{
814
{
459
DisasContext dc;
815
DisasContext dc;
460
816
461
- translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
462
+ translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
818
index XXXXXXX..XXXXXXX 100644
463
}
819
--- a/target/s390x/cpu.c
464
820
+++ b/target/s390x/cpu.c
465
void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
466
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
467
index XXXXXXX..XXXXXXX 100644
830
index XXXXXXX..XXXXXXX 100644
468
--- a/target/s390x/tcg/translate.c
831
--- a/target/s390x/tcg/translate.c
469
+++ b/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
470
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
471
.disas_log = s390x_tr_disas_log,
834
.disas_log = s390x_tr_disas_log,
472
};
835
};
473
836
474
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
475
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
838
- vaddr pc, void *host_pc)
476
+ target_ulong pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
477
{
841
{
478
DisasContext dc;
842
DisasContext dc;
479
843
480
- translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
481
+ translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
845
index XXXXXXX..XXXXXXX 100644
482
}
846
--- a/target/sh4/cpu.c
483
847
+++ b/target/sh4/cpu.c
484
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
485
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
486
index XXXXXXX..XXXXXXX 100644
857
index XXXXXXX..XXXXXXX 100644
487
--- a/target/sh4/translate.c
858
--- a/target/sh4/translate.c
488
+++ b/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
489
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
490
.disas_log = sh4_tr_disas_log,
861
.tb_stop = sh4_tr_tb_stop,
491
};
862
};
492
863
493
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
494
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
865
- vaddr pc, void *host_pc)
495
+ target_ulong pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
496
{
868
{
497
DisasContext ctx;
869
DisasContext ctx;
498
870
499
- translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
500
+ translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
872
index XXXXXXX..XXXXXXX 100644
501
}
873
--- a/target/sparc/cpu.c
502
874
+++ b/target/sparc/cpu.c
503
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
504
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
505
index XXXXXXX..XXXXXXX 100644
884
index XXXXXXX..XXXXXXX 100644
506
--- a/target/sparc/translate.c
885
--- a/target/sparc/translate.c
507
+++ b/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
509
.disas_log = sparc_tr_disas_log,
888
.tb_stop = sparc_tr_tb_stop,
510
};
889
};
511
890
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
892
- vaddr pc, void *host_pc)
514
+ target_ulong pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
515
{
895
{
516
DisasContext dc = {};
896
DisasContext dc = {};
517
897
518
- translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns);
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
519
+ translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
899
index XXXXXXX..XXXXXXX 100644
520
}
900
--- a/target/tricore/cpu.c
521
901
+++ b/target/tricore/cpu.c
522
void sparc_tcg_init(void)
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
523
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
524
index XXXXXXX..XXXXXXX 100644
911
index XXXXXXX..XXXXXXX 100644
525
--- a/target/tricore/translate.c
912
--- a/target/tricore/translate.c
526
+++ b/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
527
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
528
};
915
.tb_stop = tricore_tr_tb_stop,
529
916
};
530
917
531
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
918
-
532
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
533
+ target_ulong pc, void *host_pc)
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
534
{
923
{
535
DisasContext ctx;
924
DisasContext ctx;
536
- translator_loop(&tricore_tr_ops, &ctx.base, cs, tb, max_insns);
925
translator_loop(cs, tb, max_insns, pc, host_pc,
537
+ translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
538
+ &tricore_tr_ops, &ctx.base);
927
index XXXXXXX..XXXXXXX 100644
539
}
928
--- a/target/xtensa/cpu.c
540
929
+++ b/target/xtensa/cpu.c
541
void
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
542
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
543
index XXXXXXX..XXXXXXX 100644
939
index XXXXXXX..XXXXXXX 100644
544
--- a/target/xtensa/translate.c
940
--- a/target/xtensa/translate.c
545
+++ b/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
546
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
547
.disas_log = xtensa_tr_disas_log,
943
.tb_stop = xtensa_tr_tb_stop,
548
};
944
};
549
945
550
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
551
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
947
- vaddr pc, void *host_pc)
552
+ target_ulong pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
553
{
950
{
554
DisasContext dc = {};
951
DisasContext dc = {};
555
- translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
556
+ translator_loop(cpu, tb, max_insns, pc, host_pc,
557
+ &xtensa_translator_ops, &dc.base);
558
}
559
560
void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
561
--
953
--
562
2.34.1
954
2.43.0
955
956
diff view generated by jsdifflib