1
The following changes since commit 3d48b6b687c558a042d91370633b91c6e29e0e05:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge tag 'pull-request-2024-05-14' of https://gitlab.com/thuth/qemu into staging (2024-05-14 17:24:04 +0200)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20240515
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to c9290dfebfdba5c13baa5e1f10e13a1c876b0643:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
tcg/loongarch64: Fill out tcg_out_{ld,st} for vector regs (2024-05-15 08:57:39 +0200)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
tcg/loongarch64: Fill out tcg_out_{ld,st} for vector regs
14
tcg/optimize: Remove in-flight mask data from OptContext
15
accel/tcg: Improve disassembly for target and plugin
15
fpu: Add float*_muladd_scalbn
16
fpu: Remove float_muladd_halve_result
17
fpu: Add float_round_nearest_even_max
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
16
21
17
----------------------------------------------------------------
22
----------------------------------------------------------------
18
Philippe Mathieu-Daudé (1):
23
Ilya Leoshkevich (1):
19
accel/tcg: Remove cpu_ldsb_code / cpu_ldsw_code
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
20
25
21
Richard Henderson (33):
26
Pierrick Bouvier (1):
22
accel/tcg: Use vaddr in translator_ld*
27
plugins: optimize cpu_index code generation
23
accel/tcg: Hide in_same_page outside of a target-specific context
24
accel/tcg: Pass DisasContextBase to translator_fake_ldb
25
accel/tcg: Reorg translator_ld*
26
accel/tcg: Cap the translation block when we encounter mmio
27
accel/tcg: Record mmio bytes during translation
28
accel/tcg: Record when translator_fake_ldb is used
29
accel/tcg: Record DisasContextBase in tcg_ctx for plugins
30
plugins: Copy memory in qemu_plugin_insn_data
31
accel/tcg: Implement translator_st
32
plugins: Use translator_st for qemu_plugin_insn_data
33
plugins: Read mem_only directly from TB cflags
34
plugins: Use DisasContextBase for qemu_plugin_insn_haddr
35
plugins: Use DisasContextBase for qemu_plugin_tb_vaddr
36
plugins: Merge alloc_tcg_plugin_context into plugin_gen_tb_start
37
accel/tcg: Provide default implementation of disas_log
38
accel/tcg: Return bool from TranslatorOps.disas_log
39
disas: Split disas.c
40
disas: Use translator_st to get disassembly data
41
accel/tcg: Introduce translator_fake_ld
42
target/s390x: Fix translator_fake_ld length
43
target/s390x: Disassemble EXECUTEd instructions
44
target/hexagon: Use translator_ldl in pkt_crosses_page
45
target/microblaze: Use translator_ldl
46
target/i386: Use translator_ldub for everything
47
target/avr: Use translator_lduw
48
target/cris: Use translator_ld* in cris_fetch
49
target/cris: Use cris_fetch in translate_v10.c.inc
50
target/riscv: Use translator_ld* for everything
51
target/rx: Use translator_ld*
52
target/xtensa: Use translator_ldub in xtensa_insn_len
53
target/s390x: Use translator_lduw in get_next_pc
54
tcg/loongarch64: Fill out tcg_out_{ld,st} for vector regs
55
28
56
disas/disas-internal.h | 4 +
29
Richard Henderson (70):
57
include/disas/disas.h | 9 +-
30
tcg/optimize: Split out finish_bb, finish_ebb
58
include/exec/cpu_ldst.h | 10 --
31
tcg/optimize: Split out fold_affected_mask
59
include/exec/plugin-gen.h | 7 +-
32
tcg/optimize: Copy mask writeback to fold_masks
60
include/exec/translator.h | 74 ++++++---
33
tcg/optimize: Split out fold_masks_zs
61
include/qemu/plugin.h | 22 +--
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
62
include/qemu/qemu-plugin.h | 15 +-
35
tcg/optimize: Change representation of s_mask
63
include/qemu/typedefs.h | 1 +
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
64
include/tcg/tcg.h | 1 +
37
tcg/optimize: Introduce const value accessors for TempOptInfo
65
accel/tcg/plugin-gen.c | 63 +++-----
38
tcg/optimize: Use fold_masks_zs in fold_and
66
accel/tcg/translator.c | 331 ++++++++++++++++++++++++--------------
39
tcg/optimize: Use fold_masks_zs in fold_andc
67
contrib/plugins/execlog.c | 5 +-
40
tcg/optimize: Use fold_masks_zs in fold_bswap
68
contrib/plugins/howvec.c | 4 +-
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
69
disas/disas-common.c | 104 ++++++++++++
42
tcg/optimize: Use fold_masks_z in fold_ctpop
70
disas/disas-host.c | 129 +++++++++++++++
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
71
disas/disas-mon.c | 15 ++
44
tcg/optimize: Compute sign mask in fold_deposit
72
disas/disas-target.c | 99 ++++++++++++
45
tcg/optimize: Use finish_folding in fold_divide
73
disas/disas.c | 338 ---------------------------------------
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
74
disas/objdump.c | 37 +++++
47
tcg/optimize: Use fold_masks_s in fold_eqv
75
plugins/api.c | 57 +++++--
48
tcg/optimize: Use fold_masks_z in fold_extract
76
target/alpha/translate.c | 9 --
49
tcg/optimize: Use finish_folding in fold_extract2
77
target/arm/tcg/translate-a64.c | 11 --
50
tcg/optimize: Use fold_masks_zs in fold_exts
78
target/arm/tcg/translate.c | 12 --
51
tcg/optimize: Use fold_masks_z in fold_extu
79
target/avr/translate.c | 11 +-
52
tcg/optimize: Use fold_masks_zs in fold_movcond
80
target/cris/translate.c | 37 +----
53
tcg/optimize: Use finish_folding in fold_mul*
81
target/hexagon/translate.c | 11 +-
54
tcg/optimize: Use fold_masks_s in fold_nand
82
target/hppa/translate.c | 21 ++-
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
83
target/i386/tcg/translate.c | 19 +--
56
tcg/optimize: Use fold_masks_s in fold_nor
84
target/loongarch/tcg/translate.c | 8 -
57
tcg/optimize: Use fold_masks_s in fold_not
85
target/m68k/translate.c | 9 --
58
tcg/optimize: Use fold_masks_zs in fold_or
86
target/microblaze/translate.c | 11 +-
59
tcg/optimize: Use fold_masks_zs in fold_orc
87
target/mips/tcg/translate.c | 9 --
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
88
target/openrisc/translate.c | 11 --
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
89
target/ppc/translate.c | 9 --
62
tcg/optimize: Use finish_folding in fold_remainder
90
target/riscv/translate.c | 24 +--
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
91
target/rx/translate.c | 35 ++--
64
tcg/optimize: Use fold_masks_z in fold_setcond
92
target/s390x/tcg/translate.c | 26 ++-
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
93
target/sh4/translate.c | 9 --
66
tcg/optimize: Use fold_masks_z in fold_setcond2
94
target/sparc/translate.c | 9 --
67
tcg/optimize: Use finish_folding in fold_cmp_vec
95
target/tricore/translate.c | 9 --
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
96
target/xtensa/translate.c | 12 +-
69
tcg/optimize: Use fold_masks_zs in fold_sextract
97
tcg/tcg.c | 12 --
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
98
target/cris/translate_v10.c.inc | 30 ++--
71
tcg/optimize: Simplify sign bit test in fold_shift
99
tcg/loongarch64/tcg-target.c.inc | 103 +++++++++---
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
100
disas/meson.build | 8 +-
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
101
45 files changed, 899 insertions(+), 891 deletions(-)
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
102
create mode 100644 disas/disas-common.c
75
tcg/optimize: Use fold_masks_zs in fold_xor
103
create mode 100644 disas/disas-host.c
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
104
create mode 100644 disas/disas-target.c
77
tcg/optimize: Use finish_folding as default in tcg_optimize
105
delete mode 100644 disas/disas.c
78
tcg/optimize: Remove z_mask, s_mask from OptContext
106
create mode 100644 disas/objdump.c
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
107
100
101
include/exec/translator.h | 14 -
102
include/fpu/softfloat-types.h | 2 +
103
include/fpu/softfloat.h | 14 +-
104
include/hw/core/tcg-cpu-ops.h | 13 +
105
target/alpha/cpu.h | 2 +
106
target/arm/internals.h | 2 +
107
target/avr/cpu.h | 2 +
108
target/hexagon/cpu.h | 2 +
109
target/hexagon/fma_emu.h | 3 -
110
target/hppa/cpu.h | 2 +
111
target/i386/tcg/helper-tcg.h | 2 +
112
target/loongarch/internals.h | 2 +
113
target/m68k/cpu.h | 2 +
114
target/microblaze/cpu.h | 2 +
115
target/mips/tcg/tcg-internal.h | 2 +
116
target/openrisc/cpu.h | 2 +
117
target/ppc/cpu.h | 2 +
118
target/riscv/cpu.h | 3 +
119
target/rx/cpu.h | 2 +
120
target/s390x/s390x-internal.h | 2 +
121
target/sh4/cpu.h | 2 +
122
target/sparc/cpu.h | 2 +
123
target/sparc/helper.h | 4 +-
124
target/tricore/cpu.h | 2 +
125
target/xtensa/cpu.h | 2 +
126
accel/tcg/cpu-exec.c | 8 +-
127
accel/tcg/plugin-gen.c | 9 +
128
accel/tcg/translate-all.c | 8 +-
129
fpu/softfloat.c | 63 +--
130
target/alpha/cpu.c | 1 +
131
target/alpha/translate.c | 4 +-
132
target/arm/cpu.c | 1 +
133
target/arm/tcg/cpu-v7m.c | 1 +
134
target/arm/tcg/helper-a64.c | 6 +-
135
target/arm/tcg/translate.c | 5 +-
136
target/avr/cpu.c | 1 +
137
target/avr/translate.c | 6 +-
138
target/hexagon/cpu.c | 1 +
139
target/hexagon/fma_emu.c | 496 ++++++---------------
140
target/hexagon/op_helper.c | 125 ++----
141
target/hexagon/translate.c | 4 +-
142
target/hppa/cpu.c | 1 +
143
target/hppa/translate.c | 4 +-
144
target/i386/tcg/tcg-cpu.c | 1 +
145
target/i386/tcg/translate.c | 5 +-
146
target/loongarch/cpu.c | 1 +
147
target/loongarch/tcg/translate.c | 4 +-
148
target/m68k/cpu.c | 1 +
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
1
We do not need to separately record the start of the TB.
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
5
---
13
---
6
include/qemu/plugin.h | 1 -
14
accel/tcg/plugin-gen.c | 9 +++++++++
7
accel/tcg/plugin-gen.c | 3 +--
15
1 file changed, 9 insertions(+)
8
plugins/api.c | 3 ++-
9
3 files changed, 3 insertions(+), 4 deletions(-)
10
16
11
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/qemu/plugin.h
14
+++ b/include/qemu/plugin.h
15
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_scoreboard {
16
struct qemu_plugin_tb {
17
GPtrArray *insns;
18
size_t n;
19
- uint64_t vaddr;
20
21
/* if set, the TB calls helpers that might access guest memory */
22
bool mem_helper;
23
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
24
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
25
--- a/accel/tcg/plugin-gen.c
19
--- a/accel/tcg/plugin-gen.c
26
+++ b/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
27
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
28
int insn_idx = -1;
22
29
23
static TCGv_i32 gen_cpu_index(void)
30
if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN)
31
- && qemu_log_in_addr_range(plugin_tb->vaddr))) {
32
+ && qemu_log_in_addr_range(tcg_ctx->plugin_db->pc_first))) {
33
FILE *logfile = qemu_log_trylock();
34
if (logfile) {
35
fprintf(logfile, "OP before plugin injection:\n");
36
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db)
37
38
ret = true;
39
40
- ptb->vaddr = db->pc_first;
41
ptb->mem_helper = false;
42
43
tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
44
diff --git a/plugins/api.c b/plugins/api.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/plugins/api.c
47
+++ b/plugins/api.c
48
@@ -XXX,XX +XXX,XX @@ size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb)
49
50
uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb)
51
{
24
{
52
- return tb->vaddr;
25
+ /*
53
+ const DisasContextBase *db = tcg_ctx->plugin_db;
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
54
+ return db->pc_first;
27
+ * including scoreboard index, will be optimized out.
55
}
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
56
29
+ * vcpus are created before generating code.
57
struct qemu_plugin_insn *
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
58
--
37
--
59
2.34.1
38
2.43.0
60
61
diff view generated by jsdifflib
1
We can delay the computation of haddr until the plugin
1
Call them directly from the opcode switch statement in tcg_optimize,
2
actually requests it.
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
3
4
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
include/qemu/plugin.h | 4 ----
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
8
accel/tcg/plugin-gen.c | 20 --------------------
9
1 file changed, 31 insertions(+), 16 deletions(-)
9
plugins/api.c | 25 ++++++++++++++++++++++++-
10
3 files changed, 24 insertions(+), 25 deletions(-)
11
10
12
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
14
--- a/include/qemu/plugin.h
13
--- a/tcg/optimize.c
15
+++ b/include/qemu/plugin.h
14
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_dyn_cb {
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
17
/* Internal context for instrumenting an instruction */
16
}
18
struct qemu_plugin_insn {
17
}
19
uint64_t vaddr;
18
20
- void *haddr;
19
+static void finish_bb(OptContext *ctx)
21
GArray *insn_cbs;
20
+{
22
GArray *mem_cbs;
21
+ /* We only optimize memory barriers across basic blocks. */
23
uint8_t len;
22
+ ctx->prev_mb = NULL;
24
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_tb {
23
+}
25
GPtrArray *insns;
24
+
26
size_t n;
25
+static void finish_ebb(OptContext *ctx)
27
uint64_t vaddr;
26
+{
28
- uint64_t vaddr2;
27
+ finish_bb(ctx);
29
- void *haddr1;
28
+ /* We only optimize across extended basic blocks. */
30
- void *haddr2;
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
31
30
+ remove_mem_copy_all(ctx);
32
/* if set, the TB calls helpers that might access guest memory */
31
+}
33
bool mem_helper;
32
+
34
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
33
static void finish_folding(OptContext *ctx, TCGOp *op)
35
index XXXXXXX..XXXXXXX 100644
34
{
36
--- a/accel/tcg/plugin-gen.c
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
37
+++ b/accel/tcg/plugin-gen.c
36
int i, nb_oargs;
38
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db)
39
ret = true;
40
41
ptb->vaddr = db->pc_first;
42
- ptb->vaddr2 = -1;
43
- ptb->haddr1 = db->host_addr[0];
44
- ptb->haddr2 = NULL;
45
ptb->mem_helper = false;
46
47
tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
48
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
49
pc = db->pc_next;
50
insn->vaddr = pc;
51
37
52
- /*
38
- /*
53
- * Detect page crossing to get the new host address.
39
- * We only optimize extended basic blocks. If the opcode ends a BB
54
- * Note that we skip this when haddr1 == NULL, e.g. when we're
40
- * and is not a conditional branch, reset all temp data.
55
- * fetching instructions from a region not backed by RAM.
56
- */
41
- */
57
- if (ptb->haddr1 == NULL) {
42
- if (def->flags & TCG_OPF_BB_END) {
58
- insn->haddr = NULL;
43
- ctx->prev_mb = NULL;
59
- } else if (is_same_page(db, db->pc_next)) {
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
60
- insn->haddr = ptb->haddr1 + pc - ptb->vaddr;
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
61
- } else {
46
- remove_mem_copy_all(ctx);
62
- if (ptb->vaddr2 == -1) {
63
- ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
64
- get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2);
65
- }
47
- }
66
- insn->haddr = ptb->haddr2 + pc - ptb->vaddr2;
48
- return;
67
- }
49
- }
68
-
50
-
69
tcg_gen_plugin_cb(PLUGIN_GEN_FROM_INSN);
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
70
}
64
}
71
65
72
diff --git a/plugins/api.c b/plugins/api.c
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
73
index XXXXXXX..XXXXXXX 100644
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
74
--- a/plugins/api.c
68
}
75
+++ b/plugins/api.c
69
op->opc = INDEX_op_br;
76
@@ -XXX,XX +XXX,XX @@ uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn)
70
op->args[0] = label;
77
71
- break;
78
void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn)
72
+ finish_ebb(ctx);
79
{
73
+ return true;
80
- return insn->haddr;
74
}
81
+ const DisasContextBase *db = tcg_ctx->plugin_db;
75
- return false;
82
+ vaddr page0_last = db->pc_first | ~TARGET_PAGE_MASK;
83
+
76
+
84
+ if (db->fake_insn) {
77
+ finish_bb(ctx);
85
+ return NULL;
78
+ return true;
86
+ }
87
+
88
+ /*
89
+ * ??? The return value is not intended for use of host memory,
90
+ * but as a proxy for address space and physical address.
91
+ * Thus we are only interested in the first byte and do not
92
+ * care about spanning pages.
93
+ */
94
+ if (insn->vaddr <= page0_last) {
95
+ if (db->host_addr[0] == NULL) {
96
+ return NULL;
97
+ }
98
+ return db->host_addr[0] + insn->vaddr - db->pc_first;
99
+ } else {
100
+ if (db->host_addr[1] == NULL) {
101
+ return NULL;
102
+ }
103
+ return db->host_addr[1] + insn->vaddr - (page0_last + 1);
104
+ }
105
}
79
}
106
80
107
char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn)
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
83
CASE_OP_32_64_VEC(xor):
84
done = fold_xor(&ctx, op);
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
108
--
97
--
109
2.34.1
98
2.43.0
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
8
---
4
include/exec/translator.h | 21 +++++++++------------
9
tcg/optimize.c | 21 ++++++---------------
5
accel/tcg/translator.c | 15 ++++++++-------
10
1 file changed, 6 insertions(+), 15 deletions(-)
6
target/hexagon/translate.c | 1 +
7
target/microblaze/translate.c | 1 +
8
4 files changed, 19 insertions(+), 19 deletions(-)
9
11
10
diff --git a/include/exec/translator.h b/include/exec/translator.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
12
--- a/include/exec/translator.h
14
--- a/tcg/optimize.c
13
+++ b/include/exec/translator.h
15
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
17
* Record "zero" and "sign" masks for the single output of @op.
18
* See TempOptInfo definition of z_mask and s_mask.
19
* If z_mask allows, fold the output to constant zero.
20
+ * The passed s_mask may be augmented by z_mask.
15
*/
21
*/
16
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
17
#include "qemu/bswap.h"
23
uint64_t z_mask, uint64_t s_mask)
18
-#include "exec/cpu-common.h"
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
19
-#include "exec/cpu-defs.h"
25
20
-#include "exec/abi_ptr.h"
26
ti = ts_info(ts);
21
-#include "cpu.h"
27
ti->z_mask = z_mask;
22
+#include "exec/vaddr.h"
28
- ti->s_mask = s_mask;
23
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
24
/**
30
return true;
25
* gen_intermediate_code
26
@@ -XXX,XX +XXX,XX @@ bool translator_io_start(DisasContextBase *db);
27
* the relevant information at translation time.
28
*/
29
30
-uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
31
-uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
32
-uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
33
-uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
34
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc);
35
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc);
36
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc);
37
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc);
38
39
static inline uint16_t
40
translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
41
- abi_ptr pc, bool do_swap)
42
+ vaddr pc, bool do_swap)
43
{
44
uint16_t ret = translator_lduw(env, db, pc);
45
if (do_swap) {
46
@@ -XXX,XX +XXX,XX @@ translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
47
48
static inline uint32_t
49
translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
50
- abi_ptr pc, bool do_swap)
51
+ vaddr pc, bool do_swap)
52
{
53
uint32_t ret = translator_ldl(env, db, pc);
54
if (do_swap) {
55
@@ -XXX,XX +XXX,XX @@ translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
56
57
static inline uint64_t
58
translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
59
- abi_ptr pc, bool do_swap)
60
+ vaddr pc, bool do_swap)
61
{
62
uint64_t ret = translator_ldq(env, db, pc);
63
if (do_swap) {
64
@@ -XXX,XX +XXX,XX @@ translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
65
* re-synthesised for s390x "ex"). It ensures we update other areas of
66
* the translator with details of the executed instruction.
67
*/
68
-void translator_fake_ldb(uint8_t insn8, abi_ptr pc);
69
+void translator_fake_ldb(uint8_t insn8, vaddr pc);
70
71
/*
72
* Return whether addr is on the same page as where disassembly started.
73
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/accel/tcg/translator.c
76
+++ b/accel/tcg/translator.c
77
@@ -XXX,XX +XXX,XX @@
78
#include "exec/translator.h"
79
#include "exec/cpu_ldst.h"
80
#include "exec/plugin-gen.h"
81
+#include "exec/cpu_ldst.h"
82
#include "tcg/tcg-op-common.h"
83
#include "internal-target.h"
84
85
@@ -XXX,XX +XXX,XX @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
86
return host + (pc - base);
87
}
31
}
88
32
89
-static void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
90
+static void plugin_insn_append(vaddr pc, const void *from, size_t size)
34
default:
91
{
35
g_assert_not_reached();
92
#ifdef CONFIG_PLUGIN
36
}
93
struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn;
37
- s_mask = smask_from_zmask(z_mask);
94
- abi_ptr off;
38
95
+ size_t off;
39
+ s_mask = 0;
96
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
97
if (insn == NULL) {
41
case TCG_BSWAP_OZ:
98
return;
42
break;
99
@@ -XXX,XX +XXX,XX @@ static void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
100
#endif
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
101
}
57
}
102
58
103
-uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
104
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
60
default:
105
{
61
g_assert_not_reached();
106
uint8_t ret;
62
}
107
void *p = translator_access(env, db, pc, sizeof(ret));
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
108
@@ -XXX,XX +XXX,XX @@ uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
64
return false;
109
return ret;
110
}
65
}
111
66
112
-uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
113
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc)
68
return true;
114
{
69
}
115
uint16_t ret, plug;
70
ctx->z_mask = z_mask;
116
void *p = translator_access(env, db, pc, sizeof(ret));
71
- ctx->s_mask = smask_from_zmask(z_mask);
117
@@ -XXX,XX +XXX,XX @@ uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
72
118
return ret;
73
return fold_masks(ctx, op);
119
}
74
}
120
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
121
-uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
76
}
122
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc)
77
123
{
78
ctx->z_mask = z_mask;
124
uint32_t ret, plug;
79
- ctx->s_mask = smask_from_zmask(z_mask);
125
void *p = translator_access(env, db, pc, sizeof(ret));
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
126
@@ -XXX,XX +XXX,XX @@ uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
81
return true;
127
return ret;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
128
}
103
}
129
104
130
-uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
131
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
106
}
132
{
107
133
uint64_t ret, plug;
108
ctx->z_mask = 1;
134
void *p = translator_access(env, db, pc, sizeof(ret));
109
- ctx->s_mask = smask_from_zmask(1);
135
@@ -XXX,XX +XXX,XX @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
110
return false;
136
return ret;
111
137
}
112
do_setcond_const:
138
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
139
-void translator_fake_ldb(uint8_t insn8, abi_ptr pc)
114
break;
140
+void translator_fake_ldb(uint8_t insn8, vaddr pc)
115
CASE_OP_32_64(ld8u):
141
{
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
142
plugin_insn_append(pc, &insn8, sizeof(insn8));
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
143
}
118
break;
144
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
119
CASE_OP_32_64(ld16s):
145
index XXXXXXX..XXXXXXX 100644
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
146
--- a/target/hexagon/translate.c
121
break;
147
+++ b/target/hexagon/translate.c
122
CASE_OP_32_64(ld16u):
148
@@ -XXX,XX +XXX,XX @@
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
149
#include "exec/translation-block.h"
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
150
#include "exec/cpu_ldst.h"
125
break;
151
#include "exec/log.h"
126
case INDEX_op_ld32s_i64:
152
+#include "exec/cpu_ldst.h"
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
153
#include "internal.h"
128
break;
154
#include "attribs.h"
129
case INDEX_op_ld32u_i64:
155
#include "insn.h"
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
156
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
157
index XXXXXXX..XXXXXXX 100644
132
break;
158
--- a/target/microblaze/translate.c
133
default:
159
+++ b/target/microblaze/translate.c
134
g_assert_not_reached();
160
@@ -XXX,XX +XXX,XX @@
161
#include "tcg/tcg-op.h"
162
#include "exec/helper-proto.h"
163
#include "exec/helper-gen.h"
164
+#include "exec/cpu_ldst.h"
165
#include "exec/translator.h"
166
#include "qemu/qemu-print.h"
167
168
--
135
--
169
2.34.1
136
2.43.0
170
171
diff view generated by jsdifflib
1
The routines in disas-common.c are also used from disas-mon.c.
1
Change the representation from sign bit repetitions to all bits equal
2
Otherwise the rest of disassembly is only used from tcg.
2
to the sign bit, including the sign bit itself.
3
While we're at it, put host and target code into separate files.
4
3
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
23
---
8
disas/disas-internal.h | 4 +
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
9
include/disas/disas.h | 4 +
25
1 file changed, 15 insertions(+), 49 deletions(-)
10
disas/disas-common.c | 118 ++++++++++++++
11
disas/disas-host.c | 129 ++++++++++++++++
12
disas/disas-target.c | 84 ++++++++++
13
disas/disas.c | 338 -----------------------------------------
14
disas/objdump.c | 37 +++++
15
disas/meson.build | 8 +-
16
8 files changed, 382 insertions(+), 340 deletions(-)
17
create mode 100644 disas/disas-common.c
18
create mode 100644 disas/disas-host.c
19
create mode 100644 disas/disas-target.c
20
delete mode 100644 disas/disas.c
21
create mode 100644 disas/objdump.c
22
26
23
diff --git a/disas/disas-internal.h b/disas/disas-internal.h
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
24
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
25
--- a/disas/disas-internal.h
29
--- a/tcg/optimize.c
26
+++ b/disas/disas-internal.h
30
+++ b/tcg/optimize.c
27
@@ -XXX,XX +XXX,XX @@ typedef struct CPUDebug {
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
28
CPUState *cpu;
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
29
} CPUDebug;
33
uint64_t val;
30
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
31
+void disas_initialize_debug(CPUDebug *s);
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
32
void disas_initialize_debug_target(CPUDebug *s, CPUState *cpu);
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
33
int disas_gstring_printf(FILE *stream, const char *fmt, ...)
37
} TempOptInfo;
34
G_GNUC_PRINTF(2, 3);
38
35
39
typedef struct OptContext {
36
+int print_insn_od_host(bfd_vma pc, disassemble_info *info);
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
37
+int print_insn_od_target(bfd_vma pc, disassemble_info *info);
41
38
+
42
/* In flight values from optimization. */
39
#endif
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
40
diff --git a/include/disas/disas.h b/include/disas/disas.h
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
41
index XXXXXXX..XXXXXXX 100644
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
42
--- a/include/disas/disas.h
46
TCGType type;
43
+++ b/include/disas/disas.h
47
} OptContext;
44
@@ -XXX,XX +XXX,XX @@
48
45
#define QEMU_DISAS_H
49
-/* Calculate the smask for a specific value. */
46
50
-static uint64_t smask_from_value(uint64_t value)
47
/* Disassemble this for me please... (debugging). */
48
+#ifdef CONFIG_TCG
49
void disas(FILE *out, const void *code, size_t size);
50
void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size);
51
+#endif
52
53
void monitor_disas(Monitor *mon, CPUState *cpu, uint64_t pc,
54
int nb_insn, bool is_physical);
55
56
+#ifdef CONFIG_PLUGIN
57
char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size);
58
+#endif
59
60
/* Look up symbol for debugging purpose. Returns "" if unknown. */
61
const char *lookup_symbol(uint64_t orig_addr);
62
diff --git a/disas/disas-common.c b/disas/disas-common.c
63
new file mode 100644
64
index XXXXXXX..XXXXXXX
65
--- /dev/null
66
+++ b/disas/disas-common.c
67
@@ -XXX,XX +XXX,XX @@
68
+/*
69
+ * Common routines for disassembly.
70
+ * SPDX-License-Identifier: GPL-2.0-or-later
71
+ */
72
+
73
+#include "qemu/osdep.h"
74
+#include "disas/disas.h"
75
+#include "disas/capstone.h"
76
+#include "hw/core/cpu.h"
77
+#include "exec/tswap.h"
78
+#include "exec/memory.h"
79
+#include "disas-internal.h"
80
+
81
+
82
+/* Filled in by elfload.c. Simplistic, but will do for now. */
83
+struct syminfo *syminfos = NULL;
84
+
85
+/*
86
+ * Get LENGTH bytes from info's buffer, at target address memaddr.
87
+ * Transfer them to myaddr.
88
+ */
89
+static int target_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
90
+ struct disassemble_info *info)
91
+{
92
+ CPUDebug *s = container_of(info, CPUDebug, info);
93
+ int r = cpu_memory_rw_debug(s->cpu, memaddr, myaddr, length, 0);
94
+ return r ? EIO : 0;
95
+}
96
+
97
+/*
98
+ * Print an error message. We can assume that this is in response to
99
+ * an error return from {host,target}_read_memory.
100
+ */
101
+static void perror_memory(int status, bfd_vma memaddr,
102
+ struct disassemble_info *info)
103
+{
104
+ if (status != EIO) {
105
+ /* Can't happen. */
106
+ info->fprintf_func(info->stream, "Unknown error %d\n", status);
107
+ } else {
108
+ /* Address between memaddr and memaddr + len was out of bounds. */
109
+ info->fprintf_func(info->stream,
110
+ "Address 0x%" PRIx64 " is out of bounds.\n",
111
+ memaddr);
112
+ }
113
+}
114
+
115
+/* Print address in hex. */
116
+static void print_address(bfd_vma addr, struct disassemble_info *info)
117
+{
118
+ info->fprintf_func(info->stream, "0x%" PRIx64, addr);
119
+}
120
+
121
+/* Stub prevents some fruitless earching in optabs disassemblers. */
122
+static int symbol_at_address(bfd_vma addr, struct disassemble_info *info)
123
+{
124
+ return 1;
125
+}
126
+
127
+void disas_initialize_debug(CPUDebug *s)
128
+{
129
+ memset(s, 0, sizeof(*s));
130
+ s->info.arch = bfd_arch_unknown;
131
+ s->info.cap_arch = -1;
132
+ s->info.cap_insn_unit = 4;
133
+ s->info.cap_insn_split = 4;
134
+ s->info.memory_error_func = perror_memory;
135
+ s->info.symbol_at_address_func = symbol_at_address;
136
+}
137
+
138
+void disas_initialize_debug_target(CPUDebug *s, CPUState *cpu)
139
+{
140
+ disas_initialize_debug(s);
141
+
142
+ s->cpu = cpu;
143
+ s->info.read_memory_func = target_read_memory;
144
+ s->info.print_address_func = print_address;
145
+ if (target_words_bigendian()) {
146
+ s->info.endian = BFD_ENDIAN_BIG;
147
+ } else {
148
+ s->info.endian = BFD_ENDIAN_LITTLE;
149
+ }
150
+
151
+ CPUClass *cc = CPU_GET_CLASS(cpu);
152
+ if (cc->disas_set_info) {
153
+ cc->disas_set_info(cpu, &s->info);
154
+ }
155
+}
156
+
157
+int disas_gstring_printf(FILE *stream, const char *fmt, ...)
158
+{
159
+ /* We abuse the FILE parameter to pass a GString. */
160
+ GString *s = (GString *)stream;
161
+ int initial_len = s->len;
162
+ va_list va;
163
+
164
+ va_start(va, fmt);
165
+ g_string_append_vprintf(s, fmt, va);
166
+ va_end(va);
167
+
168
+ return s->len - initial_len;
169
+}
170
+
171
+/* Look up symbol for debugging purpose. Returns "" if unknown. */
172
+const char *lookup_symbol(uint64_t orig_addr)
173
+{
174
+ const char *symbol = "";
175
+ struct syminfo *s;
176
+
177
+ for (s = syminfos; s; s = s->next) {
178
+ symbol = s->lookup_symbol(s, orig_addr);
179
+ if (symbol[0] != '\0') {
180
+ break;
181
+ }
182
+ }
183
+
184
+ return symbol;
185
+}
186
diff --git a/disas/disas-host.c b/disas/disas-host.c
187
new file mode 100644
188
index XXXXXXX..XXXXXXX
189
--- /dev/null
190
+++ b/disas/disas-host.c
191
@@ -XXX,XX +XXX,XX @@
192
+/*
193
+ * Routines for host instruction disassembly.
194
+ * SPDX-License-Identifier: GPL-2.0-or-later
195
+ */
196
+
197
+#include "qemu/osdep.h"
198
+#include "disas/disas.h"
199
+#include "disas/capstone.h"
200
+#include "disas-internal.h"
201
+
202
+
203
+/*
204
+ * Get LENGTH bytes from info's buffer, at host address memaddr.
205
+ * Transfer them to myaddr.
206
+ */
207
+static int host_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
208
+ struct disassemble_info *info)
209
+{
210
+ if (memaddr < info->buffer_vma
211
+ || memaddr + length > info->buffer_vma + info->buffer_length) {
212
+ /* Out of bounds. Use EIO because GDB uses it. */
213
+ return EIO;
214
+ }
215
+ memcpy (myaddr, info->buffer + (memaddr - info->buffer_vma), length);
216
+ return 0;
217
+}
218
+
219
+/* Print address in hex, truncated to the width of a host virtual address. */
220
+static void host_print_address(bfd_vma addr, struct disassemble_info *info)
221
+{
222
+ info->fprintf_func(info->stream, "0x%" PRIxPTR, (uintptr_t)addr);
223
+}
224
+
225
+static void initialize_debug_host(CPUDebug *s)
226
+{
227
+ disas_initialize_debug(s);
228
+
229
+ s->info.read_memory_func = host_read_memory;
230
+ s->info.print_address_func = host_print_address;
231
+#if HOST_BIG_ENDIAN
232
+ s->info.endian = BFD_ENDIAN_BIG;
233
+#else
234
+ s->info.endian = BFD_ENDIAN_LITTLE;
235
+#endif
236
+#if defined(CONFIG_TCG_INTERPRETER)
237
+ s->info.print_insn = print_insn_tci;
238
+#elif defined(__i386__)
239
+ s->info.mach = bfd_mach_i386_i386;
240
+ s->info.cap_arch = CS_ARCH_X86;
241
+ s->info.cap_mode = CS_MODE_32;
242
+ s->info.cap_insn_unit = 1;
243
+ s->info.cap_insn_split = 8;
244
+#elif defined(__x86_64__)
245
+ s->info.mach = bfd_mach_x86_64;
246
+ s->info.cap_arch = CS_ARCH_X86;
247
+ s->info.cap_mode = CS_MODE_64;
248
+ s->info.cap_insn_unit = 1;
249
+ s->info.cap_insn_split = 8;
250
+#elif defined(_ARCH_PPC)
251
+ s->info.cap_arch = CS_ARCH_PPC;
252
+# ifdef _ARCH_PPC64
253
+ s->info.cap_mode = CS_MODE_64;
254
+# endif
255
+#elif defined(__riscv)
256
+#if defined(_ILP32) || (__riscv_xlen == 32)
257
+ s->info.print_insn = print_insn_riscv32;
258
+#elif defined(_LP64)
259
+ s->info.print_insn = print_insn_riscv64;
260
+#else
261
+#error unsupported RISC-V ABI
262
+#endif
263
+#elif defined(__aarch64__)
264
+ s->info.cap_arch = CS_ARCH_ARM64;
265
+#elif defined(__alpha__)
266
+ s->info.print_insn = print_insn_alpha;
267
+#elif defined(__sparc__)
268
+ s->info.print_insn = print_insn_sparc;
269
+ s->info.mach = bfd_mach_sparc_v9b;
270
+#elif defined(__arm__)
271
+ /* TCG only generates code for arm mode. */
272
+ s->info.cap_arch = CS_ARCH_ARM;
273
+#elif defined(__MIPSEB__)
274
+ s->info.print_insn = print_insn_big_mips;
275
+#elif defined(__MIPSEL__)
276
+ s->info.print_insn = print_insn_little_mips;
277
+#elif defined(__m68k__)
278
+ s->info.print_insn = print_insn_m68k;
279
+#elif defined(__s390__)
280
+ s->info.cap_arch = CS_ARCH_SYSZ;
281
+ s->info.cap_insn_unit = 2;
282
+ s->info.cap_insn_split = 6;
283
+#elif defined(__hppa__)
284
+ s->info.print_insn = print_insn_hppa;
285
+#elif defined(__loongarch__)
286
+ s->info.print_insn = print_insn_loongarch;
287
+#endif
288
+}
289
+
290
+/* Disassemble this for me please... (debugging). */
291
+void disas(FILE *out, const void *code, size_t size)
292
+{
293
+ uintptr_t pc;
294
+ int count;
295
+ CPUDebug s;
296
+
297
+ initialize_debug_host(&s);
298
+ s.info.fprintf_func = fprintf;
299
+ s.info.stream = out;
300
+ s.info.buffer = code;
301
+ s.info.buffer_vma = (uintptr_t)code;
302
+ s.info.buffer_length = size;
303
+ s.info.show_opcodes = true;
304
+
305
+ if (s.info.cap_arch >= 0 && cap_disas_host(&s.info, code, size)) {
306
+ return;
307
+ }
308
+
309
+ if (s.info.print_insn == NULL) {
310
+ s.info.print_insn = print_insn_od_host;
311
+ }
312
+ for (pc = (uintptr_t)code; size > 0; pc += count, size -= count) {
313
+ fprintf(out, "0x%08" PRIxPTR ": ", pc);
314
+ count = s.info.print_insn(pc, &s.info);
315
+ fprintf(out, "\n");
316
+ if (count < 0) {
317
+ break;
318
+ }
319
+ }
320
+}
321
diff --git a/disas/disas-target.c b/disas/disas-target.c
322
new file mode 100644
323
index XXXXXXX..XXXXXXX
324
--- /dev/null
325
+++ b/disas/disas-target.c
326
@@ -XXX,XX +XXX,XX @@
327
+/*
328
+ * Routines for target instruction disassembly.
329
+ * SPDX-License-Identifier: GPL-2.0-or-later
330
+ */
331
+
332
+#include "qemu/osdep.h"
333
+#include "disas/disas.h"
334
+#include "disas/capstone.h"
335
+#include "disas-internal.h"
336
+
337
+
338
+void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size)
339
+{
340
+ uint64_t pc;
341
+ int count;
342
+ CPUDebug s;
343
+
344
+ disas_initialize_debug_target(&s, cpu);
345
+ s.info.fprintf_func = fprintf;
346
+ s.info.stream = out;
347
+ s.info.buffer_vma = code;
348
+ s.info.buffer_length = size;
349
+ s.info.show_opcodes = true;
350
+
351
+ if (s.info.cap_arch >= 0 && cap_disas_target(&s.info, code, size)) {
352
+ return;
353
+ }
354
+
355
+ if (s.info.print_insn == NULL) {
356
+ s.info.print_insn = print_insn_od_target;
357
+ }
358
+
359
+ for (pc = code; size > 0; pc += count, size -= count) {
360
+ fprintf(out, "0x%08" PRIx64 ": ", pc);
361
+ count = s.info.print_insn(pc, &s.info);
362
+ fprintf(out, "\n");
363
+ if (count < 0) {
364
+ break;
365
+ }
366
+ if (size < count) {
367
+ fprintf(out,
368
+ "Disassembler disagrees with translator over instruction "
369
+ "decoding\n"
370
+ "Please report this to qemu-devel@nongnu.org\n");
371
+ break;
372
+ }
373
+ }
374
+}
375
+
376
+#ifdef CONFIG_PLUGIN
377
+static void plugin_print_address(bfd_vma addr, struct disassemble_info *info)
378
+{
379
+ /* does nothing */
380
+}
381
+
382
+/*
383
+ * We should only be dissembling one instruction at a time here. If
384
+ * there is left over it usually indicates the front end has read more
385
+ * bytes than it needed.
386
+ */
387
+char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size)
388
+{
389
+ CPUDebug s;
390
+ GString *ds = g_string_new(NULL);
391
+
392
+ disas_initialize_debug_target(&s, cpu);
393
+ s.info.fprintf_func = disas_gstring_printf;
394
+ s.info.stream = (FILE *)ds; /* abuse this slot */
395
+ s.info.buffer_vma = addr;
396
+ s.info.buffer_length = size;
397
+ s.info.print_address_func = plugin_print_address;
398
+
399
+ if (s.info.cap_arch >= 0 && cap_disas_plugin(&s.info, addr, size)) {
400
+ ; /* done */
401
+ } else if (s.info.print_insn) {
402
+ s.info.print_insn(addr, &s.info);
403
+ } else {
404
+ ; /* cannot disassemble -- return empty string */
405
+ }
406
+
407
+ /* Return the buffer, freeing the GString container. */
408
+ return g_string_free(ds, false);
409
+}
410
+#endif /* CONFIG_PLUGIN */
411
diff --git a/disas/disas.c b/disas/disas.c
412
deleted file mode 100644
413
index XXXXXXX..XXXXXXX
414
--- a/disas/disas.c
415
+++ /dev/null
416
@@ -XXX,XX +XXX,XX @@
417
-/* General "disassemble this chunk" code. Used for debugging. */
418
-#include "qemu/osdep.h"
419
-#include "disas/disas-internal.h"
420
-#include "elf.h"
421
-#include "qemu/qemu-print.h"
422
-#include "disas/disas.h"
423
-#include "disas/capstone.h"
424
-#include "hw/core/cpu.h"
425
-#include "exec/tswap.h"
426
-#include "exec/memory.h"
427
-
428
-/* Filled in by elfload.c. Simplistic, but will do for now. */
429
-struct syminfo *syminfos = NULL;
430
-
431
-/*
432
- * Get LENGTH bytes from info's buffer, at host address memaddr.
433
- * Transfer them to myaddr.
434
- */
435
-static int host_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
436
- struct disassemble_info *info)
437
-{
51
-{
438
- if (memaddr < info->buffer_vma
52
- int rep = clrsb64(value);
439
- || memaddr + length > info->buffer_vma + info->buffer_length) {
53
- return ~(~0ull >> rep);
440
- /* Out of bounds. Use EIO because GDB uses it. */
441
- return EIO;
442
- }
443
- memcpy (myaddr, info->buffer + (memaddr - info->buffer_vma), length);
444
- return 0;
445
-}
54
-}
446
-
55
-
447
-/*
56
-/*
448
- * Get LENGTH bytes from info's buffer, at target address memaddr.
57
- * Calculate the smask for a given set of known-zeros.
449
- * Transfer them to myaddr.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
450
- */
61
- */
451
-static int target_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
62
-static uint64_t smask_from_zmask(uint64_t zmask)
452
- struct disassemble_info *info)
453
-{
63
-{
454
- CPUDebug *s = container_of(info, CPUDebug, info);
64
- /*
455
- int r = cpu_memory_rw_debug(s->cpu, memaddr, myaddr, length, 0);
65
- * Only the 0 bits are significant for zmask, thus the msb itself
456
- return r ? EIO : 0;
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
457
-}
74
-}
458
-
75
-
459
-/*
76
-/*
460
- * Print an error message. We can assume that this is in response to
77
- * Recreate a properly left-aligned smask after manipulation.
461
- * an error return from {host,target}_read_memory.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
462
- */
81
- */
463
-static void perror_memory(int status, bfd_vma memaddr,
82
-static uint64_t smask_from_smask(int64_t smask)
464
- struct disassemble_info *info)
465
-{
83
-{
466
- if (status != EIO) {
84
- /* Only the 1 bits are significant for smask */
467
- /* Can't happen. */
85
- return smask_from_zmask(~smask);
468
- info->fprintf_func(info->stream, "Unknown error %d\n", status);
469
- } else {
470
- /* Address between memaddr and memaddr + len was out of bounds. */
471
- info->fprintf_func(info->stream,
472
- "Address 0x%" PRIx64 " is out of bounds.\n",
473
- memaddr);
474
- }
475
-}
86
-}
476
-
87
-
477
-/* Print address in hex. */
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
478
-static void print_address(bfd_vma addr, struct disassemble_info *info)
89
{
479
-{
90
return ts->state_ptr;
480
- info->fprintf_func(info->stream, "0x%" PRIx64, addr);
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
481
-}
92
ti->is_const = true;
482
-
93
ti->val = ts->val;
483
-/* Print address in hex, truncated to the width of a host virtual address. */
94
ti->z_mask = ts->val;
484
-static void host_print_address(bfd_vma addr, struct disassemble_info *info)
95
- ti->s_mask = smask_from_value(ts->val);
485
-{
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
486
- print_address((uintptr_t)addr, info);
97
} else {
487
-}
98
ti->is_const = false;
488
-
99
ti->z_mask = -1;
489
-/* Stub prevents some fruitless earching in optabs disassemblers. */
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
490
-static int symbol_at_address(bfd_vma addr, struct disassemble_info *info)
101
*/
491
-{
102
if (i == 0) {
492
- return 1;
103
ts_info(ts)->z_mask = ctx->z_mask;
493
-}
104
- ts_info(ts)->s_mask = ctx->s_mask;
494
-
105
}
495
-static int print_insn_objdump(bfd_vma pc, disassemble_info *info,
106
}
496
- const char *prefix)
107
}
497
-{
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
498
- int i, n = info->buffer_length;
109
* The passed s_mask may be augmented by z_mask.
499
- g_autofree uint8_t *buf = g_malloc(n);
110
*/
500
-
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
501
- if (info->read_memory_func(pc, buf, n, info) == 0) {
112
- uint64_t z_mask, uint64_t s_mask)
502
- for (i = 0; i < n; ++i) {
113
+ uint64_t z_mask, int64_t s_mask)
503
- if (i % 32 == 0) {
114
{
504
- info->fprintf_func(info->stream, "\n%s: ", prefix);
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
505
- }
116
TCGTemp *ts;
506
- info->fprintf_func(info->stream, "%02x", buf[i]);
117
TempOptInfo *ti;
507
- }
118
+ int rep;
508
- } else {
119
509
- info->fprintf_func(info->stream, "unable to read memory");
120
/* Only single-output opcodes are supported here. */
510
- }
121
tcg_debug_assert(def->nb_oargs == 1);
511
- return n;
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
512
-}
123
*/
513
-
124
if (ctx->type == TCG_TYPE_I32) {
514
-static int print_insn_od_host(bfd_vma pc, disassemble_info *info)
125
z_mask = (int32_t)z_mask;
515
-{
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
516
- return print_insn_objdump(pc, info, "OBJD-H");
127
+ s_mask |= INT32_MIN;
517
-}
128
}
518
-
129
519
-static int print_insn_od_target(bfd_vma pc, disassemble_info *info)
130
if (z_mask == 0) {
520
-{
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
521
- return print_insn_objdump(pc, info, "OBJD-T");
132
522
-}
133
ti = ts_info(ts);
523
-
134
ti->z_mask = z_mask;
524
-static void initialize_debug(CPUDebug *s)
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
525
-{
526
- memset(s, 0, sizeof(*s));
527
- s->info.arch = bfd_arch_unknown;
528
- s->info.cap_arch = -1;
529
- s->info.cap_insn_unit = 4;
530
- s->info.cap_insn_split = 4;
531
- s->info.memory_error_func = perror_memory;
532
- s->info.symbol_at_address_func = symbol_at_address;
533
-}
534
-
535
-void disas_initialize_debug_target(CPUDebug *s, CPUState *cpu)
536
-{
537
- initialize_debug(s);
538
-
539
- s->cpu = cpu;
540
- s->info.read_memory_func = target_read_memory;
541
- s->info.print_address_func = print_address;
542
- if (target_words_bigendian()) {
543
- s->info.endian = BFD_ENDIAN_BIG;
544
- } else {
545
- s->info.endian = BFD_ENDIAN_LITTLE;
546
- }
547
-
548
- CPUClass *cc = CPU_GET_CLASS(cpu);
549
- if (cc->disas_set_info) {
550
- cc->disas_set_info(cpu, &s->info);
551
- }
552
-}
553
-
554
-static void initialize_debug_host(CPUDebug *s)
555
-{
556
- initialize_debug(s);
557
-
558
- s->info.read_memory_func = host_read_memory;
559
- s->info.print_address_func = host_print_address;
560
-#if HOST_BIG_ENDIAN
561
- s->info.endian = BFD_ENDIAN_BIG;
562
-#else
563
- s->info.endian = BFD_ENDIAN_LITTLE;
564
-#endif
565
-#if defined(CONFIG_TCG_INTERPRETER)
566
- s->info.print_insn = print_insn_tci;
567
-#elif defined(__i386__)
568
- s->info.mach = bfd_mach_i386_i386;
569
- s->info.cap_arch = CS_ARCH_X86;
570
- s->info.cap_mode = CS_MODE_32;
571
- s->info.cap_insn_unit = 1;
572
- s->info.cap_insn_split = 8;
573
-#elif defined(__x86_64__)
574
- s->info.mach = bfd_mach_x86_64;
575
- s->info.cap_arch = CS_ARCH_X86;
576
- s->info.cap_mode = CS_MODE_64;
577
- s->info.cap_insn_unit = 1;
578
- s->info.cap_insn_split = 8;
579
-#elif defined(_ARCH_PPC)
580
- s->info.cap_arch = CS_ARCH_PPC;
581
-# ifdef _ARCH_PPC64
582
- s->info.cap_mode = CS_MODE_64;
583
-# endif
584
-#elif defined(__riscv)
585
-#if defined(_ILP32) || (__riscv_xlen == 32)
586
- s->info.print_insn = print_insn_riscv32;
587
-#elif defined(_LP64)
588
- s->info.print_insn = print_insn_riscv64;
589
-#else
590
-#error unsupported RISC-V ABI
591
-#endif
592
-#elif defined(__aarch64__)
593
- s->info.cap_arch = CS_ARCH_ARM64;
594
-#elif defined(__alpha__)
595
- s->info.print_insn = print_insn_alpha;
596
-#elif defined(__sparc__)
597
- s->info.print_insn = print_insn_sparc;
598
- s->info.mach = bfd_mach_sparc_v9b;
599
-#elif defined(__arm__)
600
- /* TCG only generates code for arm mode. */
601
- s->info.cap_arch = CS_ARCH_ARM;
602
-#elif defined(__MIPSEB__)
603
- s->info.print_insn = print_insn_big_mips;
604
-#elif defined(__MIPSEL__)
605
- s->info.print_insn = print_insn_little_mips;
606
-#elif defined(__m68k__)
607
- s->info.print_insn = print_insn_m68k;
608
-#elif defined(__s390__)
609
- s->info.cap_arch = CS_ARCH_SYSZ;
610
- s->info.cap_insn_unit = 2;
611
- s->info.cap_insn_split = 6;
612
-#elif defined(__hppa__)
613
- s->info.print_insn = print_insn_hppa;
614
-#elif defined(__loongarch__)
615
- s->info.print_insn = print_insn_loongarch;
616
-#endif
617
-}
618
-
619
-/* Disassemble this for me please... (debugging). */
620
-void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size)
621
-{
622
- uint64_t pc;
623
- int count;
624
- CPUDebug s;
625
-
626
- disas_initialize_debug_target(&s, cpu);
627
- s.info.fprintf_func = fprintf;
628
- s.info.stream = out;
629
- s.info.buffer_vma = code;
630
- s.info.buffer_length = size;
631
- s.info.show_opcodes = true;
632
-
633
- if (s.info.cap_arch >= 0 && cap_disas_target(&s.info, code, size)) {
634
- return;
635
- }
636
-
637
- if (s.info.print_insn == NULL) {
638
- s.info.print_insn = print_insn_od_target;
639
- }
640
-
641
- for (pc = code; size > 0; pc += count, size -= count) {
642
- fprintf(out, "0x%08" PRIx64 ": ", pc);
643
- count = s.info.print_insn(pc, &s.info);
644
- fprintf(out, "\n");
645
- if (count < 0) {
646
- break;
647
- }
648
- if (size < count) {
649
- fprintf(out,
650
- "Disassembler disagrees with translator over instruction "
651
- "decoding\n"
652
- "Please report this to qemu-devel@nongnu.org\n");
653
- break;
654
- }
655
- }
656
-}
657
-
658
-int disas_gstring_printf(FILE *stream, const char *fmt, ...)
659
-{
660
- /* We abuse the FILE parameter to pass a GString. */
661
- GString *s = (GString *)stream;
662
- int initial_len = s->len;
663
- va_list va;
664
-
665
- va_start(va, fmt);
666
- g_string_append_vprintf(s, fmt, va);
667
- va_end(va);
668
-
669
- return s->len - initial_len;
670
-}
671
-
672
-static void plugin_print_address(bfd_vma addr, struct disassemble_info *info)
673
-{
674
- /* does nothing */
675
-}
676
-
677
-
678
-/*
679
- * We should only be dissembling one instruction at a time here. If
680
- * there is left over it usually indicates the front end has read more
681
- * bytes than it needed.
682
- */
683
-char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size)
684
-{
685
- CPUDebug s;
686
- GString *ds = g_string_new(NULL);
687
-
688
- disas_initialize_debug_target(&s, cpu);
689
- s.info.fprintf_func = disas_gstring_printf;
690
- s.info.stream = (FILE *)ds; /* abuse this slot */
691
- s.info.buffer_vma = addr;
692
- s.info.buffer_length = size;
693
- s.info.print_address_func = plugin_print_address;
694
-
695
- if (s.info.cap_arch >= 0 && cap_disas_plugin(&s.info, addr, size)) {
696
- ; /* done */
697
- } else if (s.info.print_insn) {
698
- s.info.print_insn(addr, &s.info);
699
- } else {
700
- ; /* cannot disassemble -- return empty string */
701
- }
702
-
703
- /* Return the buffer, freeing the GString container. */
704
- return g_string_free(ds, false);
705
-}
706
-
707
-/* Disassemble this for me please... (debugging). */
708
-void disas(FILE *out, const void *code, size_t size)
709
-{
710
- uintptr_t pc;
711
- int count;
712
- CPUDebug s;
713
-
714
- initialize_debug_host(&s);
715
- s.info.fprintf_func = fprintf;
716
- s.info.stream = out;
717
- s.info.buffer = code;
718
- s.info.buffer_vma = (uintptr_t)code;
719
- s.info.buffer_length = size;
720
- s.info.show_opcodes = true;
721
-
722
- if (s.info.cap_arch >= 0 && cap_disas_host(&s.info, code, size)) {
723
- return;
724
- }
725
-
726
- if (s.info.print_insn == NULL) {
727
- s.info.print_insn = print_insn_od_host;
728
- }
729
- for (pc = (uintptr_t)code; size > 0; pc += count, size -= count) {
730
- fprintf(out, "0x%08" PRIxPTR ": ", pc);
731
- count = s.info.print_insn(pc, &s.info);
732
- fprintf(out, "\n");
733
- if (count < 0) {
734
- break;
735
- }
736
- }
737
-
738
-}
739
-
740
-/* Look up symbol for debugging purpose. Returns "" if unknown. */
741
-const char *lookup_symbol(uint64_t orig_addr)
742
-{
743
- const char *symbol = "";
744
- struct syminfo *s;
745
-
746
- for (s = syminfos; s; s = s->next) {
747
- symbol = s->lookup_symbol(s, orig_addr);
748
- if (symbol[0] != '\0') {
749
- break;
750
- }
751
- }
752
-
753
- return symbol;
754
-}
755
diff --git a/disas/objdump.c b/disas/objdump.c
756
new file mode 100644
757
index XXXXXXX..XXXXXXX
758
--- /dev/null
759
+++ b/disas/objdump.c
760
@@ -XXX,XX +XXX,XX @@
761
+/*
762
+ * Dump disassembly as text, for processing by scripts/disas-objdump.pl.
763
+ * SPDX-License-Identifier: GPL-2.0-or-later
764
+ */
765
+
136
+
766
+#include "qemu/osdep.h"
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
767
+#include "disas-internal.h"
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
768
+
142
+
769
+
143
return true;
770
+static int print_insn_objdump(bfd_vma pc, disassemble_info *info,
144
}
771
+ const char *prefix)
145
772
+{
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
773
+ int i, n = info->buffer_length;
147
774
+ g_autofree uint8_t *buf = g_malloc(n);
148
ctx->z_mask = z_mask;
775
+
149
ctx->s_mask = s_mask;
776
+ if (info->read_memory_func(pc, buf, n, info) == 0) {
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
777
+ for (i = 0; i < n; ++i) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
778
+ if (i % 32 == 0) {
152
return true;
779
+ info->fprintf_func(info->stream, "\n%s: ", prefix);
153
}
780
+ }
154
781
+ info->fprintf_func(info->stream, "%02x", buf[i]);
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
782
+ }
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
783
+ } else {
157
ctx->s_mask = s_mask;
784
+ info->fprintf_func(info->stream, "unable to read memory");
158
785
+ }
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
786
+ return n;
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
787
+}
161
return true;
788
+
162
}
789
+int print_insn_od_host(bfd_vma pc, disassemble_info *info)
163
790
+{
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
791
+ return print_insn_objdump(pc, info, "OBJD-H");
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
792
+}
166
793
+
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
794
+int print_insn_od_target(bfd_vma pc, disassemble_info *info)
168
- ctx->s_mask = smask_from_smask(s_mask);
795
+{
169
796
+ return print_insn_objdump(pc, info, "OBJD-T");
170
return fold_masks(ctx, op);
797
+}
171
}
798
diff --git a/disas/meson.build b/disas/meson.build
799
index XXXXXXX..XXXXXXX 100644
800
--- a/disas/meson.build
801
+++ b/disas/meson.build
802
@@ -XXX,XX +XXX,XX @@ common_ss.add(when: 'CONFIG_SH4_DIS', if_true: files('sh4.c'))
803
common_ss.add(when: 'CONFIG_SPARC_DIS', if_true: files('sparc.c'))
804
common_ss.add(when: 'CONFIG_XTENSA_DIS', if_true: files('xtensa.c'))
805
common_ss.add(when: capstone, if_true: [files('capstone.c'), capstone])
806
-common_ss.add(files('disas.c'))
807
-
808
+common_ss.add(when: 'CONFIG_TCG', if_true: files(
809
+ 'disas-host.c',
810
+ 'disas-target.c',
811
+ 'objdump.c'
812
+))
813
+common_ss.add(files('disas-common.c'))
814
system_ss.add(files('disas-mon.c'))
815
specific_ss.add(capstone)
816
--
172
--
817
2.34.1
173
2.43.0
818
819
diff view generated by jsdifflib
1
Reorg translator_access into translator_ld, with a more
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
memcpy-ish interface. If both pages are in ram, do not
3
go through the caller's slow path.
4
5
Assert that the access is within the two pages that we are
6
prepared to protect, per TranslationBlock. Allow access
7
prior to pc_first, so long as it is within the first page.
8
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
3
---
12
accel/tcg/translator.c | 189 ++++++++++++++++++++++-------------------
4
tcg/optimize.c | 9 +++++----
13
1 file changed, 101 insertions(+), 88 deletions(-)
5
1 file changed, 5 insertions(+), 4 deletions(-)
14
6
15
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/translator.c
9
--- a/tcg/optimize.c
18
+++ b/accel/tcg/translator.c
10
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
12
remove_mem_copy_all(ctx);
13
}
14
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
17
{
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
19
int i, nb_oargs;
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
21
ts_info(ts)->z_mask = ctx->z_mask;
22
}
20
}
23
}
21
}
22
23
-static void *translator_access(CPUArchState *env, DisasContextBase *db,
24
- vaddr pc, size_t len)
25
+static bool translator_ld(CPUArchState *env, DisasContextBase *db,
26
+ void *dest, vaddr pc, size_t len)
27
{
28
+ TranslationBlock *tb = db->tb;
29
+ vaddr last = pc + len - 1;
30
void *host;
31
- vaddr base, end;
32
- TranslationBlock *tb;
33
-
34
- tb = db->tb;
35
+ vaddr base;
36
37
/* Use slow path if first page is MMIO. */
38
if (unlikely(tb_page_addr0(tb) == -1)) {
39
- return NULL;
40
+ return false;
41
}
42
43
- end = pc + len - 1;
44
- if (likely(is_same_page(db, end))) {
45
- host = db->host_addr[0];
46
- base = db->pc_first;
47
- } else {
48
+ host = db->host_addr[0];
49
+ base = db->pc_first;
50
+
51
+ if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) {
52
+ /* Entire read is from the first page. */
53
+ memcpy(dest, host + (pc - base), len);
54
+ return true;
55
+ }
56
+
57
+ if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) {
58
+ /* Read begins on the first page and extends to the second. */
59
+ size_t len0 = -(pc | TARGET_PAGE_MASK);
60
+ memcpy(dest, host + (pc - base), len0);
61
+ pc += len0;
62
+ dest += len0;
63
+ len -= len0;
64
+ }
65
+
66
+ /*
67
+ * The read must conclude on the second page and not extend to a third.
68
+ *
69
+ * TODO: We could allow the two pages to be virtually discontiguous,
70
+ * since we already allow the two pages to be physically discontiguous.
71
+ * The only reasonable use case would be executing an insn at the end
72
+ * of the address space wrapping around to the beginning. For that,
73
+ * we would need to know the current width of the address space.
74
+ * In the meantime, assert.
75
+ */
76
+ base = (base & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
77
+ assert(((base ^ pc) & TARGET_PAGE_MASK) == 0);
78
+ assert(((base ^ last) & TARGET_PAGE_MASK) == 0);
79
+ host = db->host_addr[1];
80
+
81
+ if (host == NULL) {
82
+ tb_page_addr_t page0, old_page1, new_page1;
83
+
84
+ new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
85
+
86
+ /*
87
+ * If the second page is MMIO, treat as if the first page
88
+ * was MMIO as well, so that we do not cache the TB.
89
+ */
90
+ if (unlikely(new_page1 == -1)) {
91
+ tb_unlock_pages(tb);
92
+ tb_set_page_addr0(tb, -1);
93
+ return false;
94
+ }
95
+
96
+ /*
97
+ * If this is not the first time around, and page1 matches,
98
+ * then we already have the page locked. Alternately, we're
99
+ * not doing anything to prevent the PTE from changing, so
100
+ * we might wind up with a different page, requiring us to
101
+ * re-do the locking.
102
+ */
103
+ old_page1 = tb_page_addr1(tb);
104
+ if (likely(new_page1 != old_page1)) {
105
+ page0 = tb_page_addr0(tb);
106
+ if (unlikely(old_page1 != -1)) {
107
+ tb_unlock_page1(page0, old_page1);
108
+ }
109
+ tb_set_page_addr1(tb, new_page1);
110
+ tb_lock_page1(page0, new_page1);
111
+ }
112
host = db->host_addr[1];
113
- base = TARGET_PAGE_ALIGN(db->pc_first);
114
- if (host == NULL) {
115
- tb_page_addr_t page0, old_page1, new_page1;
116
-
117
- new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
118
-
119
- /*
120
- * If the second page is MMIO, treat as if the first page
121
- * was MMIO as well, so that we do not cache the TB.
122
- */
123
- if (unlikely(new_page1 == -1)) {
124
- tb_unlock_pages(tb);
125
- tb_set_page_addr0(tb, -1);
126
- return NULL;
127
- }
128
-
129
- /*
130
- * If this is not the first time around, and page1 matches,
131
- * then we already have the page locked. Alternately, we're
132
- * not doing anything to prevent the PTE from changing, so
133
- * we might wind up with a different page, requiring us to
134
- * re-do the locking.
135
- */
136
- old_page1 = tb_page_addr1(tb);
137
- if (likely(new_page1 != old_page1)) {
138
- page0 = tb_page_addr0(tb);
139
- if (unlikely(old_page1 != -1)) {
140
- tb_unlock_page1(page0, old_page1);
141
- }
142
- tb_set_page_addr1(tb, new_page1);
143
- tb_lock_page1(page0, new_page1);
144
- }
145
- host = db->host_addr[1];
146
- }
147
-
148
- /* Use slow path when crossing pages. */
149
- if (is_same_page(db, pc)) {
150
- return NULL;
151
- }
152
}
153
154
- tcg_debug_assert(pc >= base);
155
- return host + (pc - base);
156
+ memcpy(dest, host + (pc - base), len);
157
+ return true;
24
+ return true;
158
}
25
}
159
26
160
static void plugin_insn_append(vaddr pc, const void *from, size_t size)
27
/*
161
@@ -XXX,XX +XXX,XX @@ static void plugin_insn_append(vaddr pc, const void *from, size_t size)
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
162
29
fold_xi_to_x(ctx, op, 0)) {
163
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
30
return true;
164
{
165
- uint8_t ret;
166
- void *p = translator_access(env, db, pc, sizeof(ret));
167
+ uint8_t raw;
168
169
- if (p) {
170
- plugin_insn_append(pc, p, sizeof(ret));
171
- return ldub_p(p);
172
+ if (!translator_ld(env, db, &raw, pc, sizeof(raw))) {
173
+ raw = cpu_ldub_code(env, pc);
174
}
31
}
175
- ret = cpu_ldub_code(env, pc);
32
- return false;
176
- plugin_insn_append(pc, &ret, sizeof(ret));
33
+ return finish_folding(ctx, op);
177
- return ret;
178
+ plugin_insn_append(pc, &raw, sizeof(raw));
179
+ return raw;
180
}
34
}
181
35
182
uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc)
36
/* We cannot as yet do_constant_folding with vectors. */
183
{
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
184
- uint16_t ret, plug;
38
fold_xi_to_x(ctx, op, 0)) {
185
- void *p = translator_access(env, db, pc, sizeof(ret));
39
return true;
186
+ uint16_t raw, tgt;
187
188
- if (p) {
189
- plugin_insn_append(pc, p, sizeof(ret));
190
- return lduw_p(p);
191
+ if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
192
+ tgt = tswap16(raw);
193
+ } else {
194
+ tgt = cpu_lduw_code(env, pc);
195
+ raw = tswap16(tgt);
196
}
40
}
197
- ret = cpu_lduw_code(env, pc);
41
- return false;
198
- plug = tswap16(ret);
42
+ return finish_folding(ctx, op);
199
- plugin_insn_append(pc, &plug, sizeof(ret));
200
- return ret;
201
+ plugin_insn_append(pc, &raw, sizeof(raw));
202
+ return tgt;
203
}
43
}
204
44
205
uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc)
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
206
{
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
207
- uint32_t ret, plug;
47
op->args[4] = arg_new_constant(ctx, bl);
208
- void *p = translator_access(env, db, pc, sizeof(ret));
48
op->args[5] = arg_new_constant(ctx, bh);
209
+ uint32_t raw, tgt;
210
211
- if (p) {
212
- plugin_insn_append(pc, p, sizeof(ret));
213
- return ldl_p(p);
214
+ if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
215
+ tgt = tswap32(raw);
216
+ } else {
217
+ tgt = cpu_ldl_code(env, pc);
218
+ raw = tswap32(tgt);
219
}
49
}
220
- ret = cpu_ldl_code(env, pc);
50
- return false;
221
- plug = tswap32(ret);
51
+ return finish_folding(ctx, op);
222
- plugin_insn_append(pc, &plug, sizeof(ret));
223
- return ret;
224
+ plugin_insn_append(pc, &raw, sizeof(raw));
225
+ return tgt;
226
}
52
}
227
53
228
uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
229
{
230
- uint64_t ret, plug;
231
- void *p = translator_access(env, db, pc, sizeof(ret));
232
+ uint64_t raw, tgt;
233
234
- if (p) {
235
- plugin_insn_append(pc, p, sizeof(ret));
236
- return ldq_p(p);
237
+ if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
238
+ tgt = tswap64(raw);
239
+ } else {
240
+ tgt = cpu_ldq_code(env, pc);
241
+ raw = tswap64(tgt);
242
}
243
- ret = cpu_ldq_code(env, pc);
244
- plug = tswap64(ret);
245
- plugin_insn_append(pc, &plug, sizeof(ret));
246
- return ret;
247
+ plugin_insn_append(pc, &raw, sizeof(raw));
248
+ return tgt;
249
}
250
251
void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8)
252
--
55
--
253
2.34.1
56
2.43.0
254
255
diff view generated by jsdifflib
1
Read from already translated pages, or saved mmio data.
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
4
---
6
include/disas/disas.h | 5 +++--
5
tcg/optimize.c | 20 +++++++++++++++++---
7
include/exec/translator.h | 4 ++--
6
1 file changed, 17 insertions(+), 3 deletions(-)
8
include/qemu/typedefs.h | 1 +
9
accel/tcg/translator.c | 2 +-
10
disas/disas-common.c | 14 --------------
11
disas/disas-mon.c | 15 +++++++++++++++
12
disas/disas-target.c | 19 +++++++++++++++++--
13
plugins/api.c | 4 ++--
14
8 files changed, 41 insertions(+), 23 deletions(-)
15
7
16
diff --git a/include/disas/disas.h b/include/disas/disas.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
18
--- a/include/disas/disas.h
10
--- a/tcg/optimize.c
19
+++ b/include/disas/disas.h
11
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
21
/* Disassemble this for me please... (debugging). */
13
return ts_info(arg_temp(arg));
22
#ifdef CONFIG_TCG
14
}
23
void disas(FILE *out, const void *code, size_t size);
15
24
-void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size);
16
+static inline bool ti_is_const(TempOptInfo *ti)
25
+void target_disas(FILE *out, CPUState *cpu, const DisasContextBase *db);
26
#endif
27
28
void monitor_disas(Monitor *mon, CPUState *cpu, uint64_t pc,
29
int nb_insn, bool is_physical);
30
31
#ifdef CONFIG_PLUGIN
32
-char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size);
33
+char *plugin_disas(CPUState *cpu, const DisasContextBase *db,
34
+ uint64_t addr, size_t size);
35
#endif
36
37
/* Look up symbol for debugging purpose. Returns "" if unknown. */
38
diff --git a/include/exec/translator.h b/include/exec/translator.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/include/exec/translator.h
41
+++ b/include/exec/translator.h
42
@@ -XXX,XX +XXX,XX @@ typedef enum DisasJumpType {
43
*
44
* Architecture-agnostic disassembly context.
45
*/
46
-typedef struct DisasContextBase {
47
+struct DisasContextBase {
48
TranslationBlock *tb;
49
vaddr pc_first;
50
vaddr pc_next;
51
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContextBase {
52
int record_start;
53
int record_len;
54
uint8_t record[32];
55
-} DisasContextBase;
56
+};
57
58
/**
59
* TranslatorOps:
60
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
61
index XXXXXXX..XXXXXXX 100644
62
--- a/include/qemu/typedefs.h
63
+++ b/include/qemu/typedefs.h
64
@@ -XXX,XX +XXX,XX @@ typedef struct CPUPluginState CPUPluginState;
65
typedef struct CPUState CPUState;
66
typedef struct DeviceState DeviceState;
67
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
68
+typedef struct DisasContextBase DisasContextBase;
69
typedef struct DisplayChangeListener DisplayChangeListener;
70
typedef struct DriveInfo DriveInfo;
71
typedef struct DumpState DumpState;
72
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/accel/tcg/translator.c
75
+++ b/accel/tcg/translator.c
76
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
77
if (!ops->disas_log ||
78
!ops->disas_log(db, cpu, logfile)) {
79
fprintf(logfile, "IN: %s\n", lookup_symbol(db->pc_first));
80
- target_disas(logfile, cpu, db->pc_first, db->tb->size);
81
+ target_disas(logfile, cpu, db);
82
}
83
fprintf(logfile, "\n");
84
qemu_log_unlock(logfile);
85
diff --git a/disas/disas-common.c b/disas/disas-common.c
86
index XXXXXXX..XXXXXXX 100644
87
--- a/disas/disas-common.c
88
+++ b/disas/disas-common.c
89
@@ -XXX,XX +XXX,XX @@
90
#include "disas/capstone.h"
91
#include "hw/core/cpu.h"
92
#include "exec/tswap.h"
93
-#include "exec/memory.h"
94
#include "disas-internal.h"
95
96
97
/* Filled in by elfload.c. Simplistic, but will do for now. */
98
struct syminfo *syminfos = NULL;
99
100
-/*
101
- * Get LENGTH bytes from info's buffer, at target address memaddr.
102
- * Transfer them to myaddr.
103
- */
104
-static int target_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
105
- struct disassemble_info *info)
106
-{
107
- CPUDebug *s = container_of(info, CPUDebug, info);
108
- int r = cpu_memory_rw_debug(s->cpu, memaddr, myaddr, length, 0);
109
- return r ? EIO : 0;
110
-}
111
-
112
/*
113
* Print an error message. We can assume that this is in response to
114
* an error return from {host,target}_read_memory.
115
@@ -XXX,XX +XXX,XX @@ void disas_initialize_debug_target(CPUDebug *s, CPUState *cpu)
116
disas_initialize_debug(s);
117
118
s->cpu = cpu;
119
- s->info.read_memory_func = target_read_memory;
120
s->info.print_address_func = print_address;
121
if (target_words_bigendian()) {
122
s->info.endian = BFD_ENDIAN_BIG;
123
diff --git a/disas/disas-mon.c b/disas/disas-mon.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/disas/disas-mon.c
126
+++ b/disas/disas-mon.c
127
@@ -XXX,XX +XXX,XX @@
128
#include "hw/core/cpu.h"
129
#include "monitor/monitor.h"
130
131
+/*
132
+ * Get LENGTH bytes from info's buffer, at target address memaddr.
133
+ * Transfer them to myaddr.
134
+ */
135
+static int
136
+virtual_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
137
+ struct disassemble_info *info)
138
+{
17
+{
139
+ CPUDebug *s = container_of(info, CPUDebug, info);
18
+ return ti->is_const;
140
+ int r = cpu_memory_rw_debug(s->cpu, memaddr, myaddr, length, 0);
141
+ return r ? EIO : 0;
142
+}
19
+}
143
+
20
+
144
static int
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
145
physical_read_memory(bfd_vma memaddr, bfd_byte *myaddr, int length,
22
+{
146
struct disassemble_info *info)
23
+ return ti->val;
147
@@ -XXX,XX +XXX,XX @@ void monitor_disas(Monitor *mon, CPUState *cpu, uint64_t pc,
148
149
if (is_physical) {
150
s.info.read_memory_func = physical_read_memory;
151
+ } else {
152
+ s.info.read_memory_func = virtual_read_memory;
153
}
154
s.info.buffer_vma = pc;
155
156
diff --git a/disas/disas-target.c b/disas/disas-target.c
157
index XXXXXXX..XXXXXXX 100644
158
--- a/disas/disas-target.c
159
+++ b/disas/disas-target.c
160
@@ -XXX,XX +XXX,XX @@
161
#include "qemu/osdep.h"
162
#include "disas/disas.h"
163
#include "disas/capstone.h"
164
+#include "exec/translator.h"
165
#include "disas-internal.h"
166
167
168
-void target_disas(FILE *out, CPUState *cpu, uint64_t code, size_t size)
169
+static int translator_read_memory(bfd_vma memaddr, bfd_byte *myaddr,
170
+ int length, struct disassemble_info *info)
171
{
172
+ const DisasContextBase *db = info->application_data;
173
+ return translator_st(db, myaddr, memaddr, length) ? 0 : EIO;
174
+}
24
+}
175
+
25
+
176
+void target_disas(FILE *out, CPUState *cpu, const struct DisasContextBase *db)
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
177
+{
27
+{
178
+ uint64_t code = db->pc_first;
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
179
+ size_t size = translator_st_len(db);
29
+}
180
uint64_t pc;
30
+
181
int count;
31
static inline bool ts_is_const(TCGTemp *ts)
182
CPUDebug s;
183
184
disas_initialize_debug_target(&s, cpu);
185
+ s.info.read_memory_func = translator_read_memory;
186
+ s.info.application_data = (void *)db;
187
s.info.fprintf_func = fprintf;
188
s.info.stream = out;
189
s.info.buffer_vma = code;
190
@@ -XXX,XX +XXX,XX @@ static void plugin_print_address(bfd_vma addr, struct disassemble_info *info)
191
* there is left over it usually indicates the front end has read more
192
* bytes than it needed.
193
*/
194
-char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size)
195
+char *plugin_disas(CPUState *cpu, const DisasContextBase *db,
196
+ uint64_t addr, size_t size)
197
{
32
{
198
CPUDebug s;
33
- return ts_info(ts)->is_const;
199
GString *ds = g_string_new(NULL);
34
+ return ti_is_const(ts_info(ts));
200
35
}
201
disas_initialize_debug_target(&s, cpu);
36
202
+ s.info.read_memory_func = translator_read_memory;
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
203
+ s.info.application_data = (void *)db;
204
s.info.fprintf_func = disas_gstring_printf;
205
s.info.stream = (FILE *)ds; /* abuse this slot */
206
s.info.buffer_vma = addr;
207
diff --git a/plugins/api.c b/plugins/api.c
208
index XXXXXXX..XXXXXXX 100644
209
--- a/plugins/api.c
210
+++ b/plugins/api.c
211
@@ -XXX,XX +XXX,XX @@ void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn)
212
213
char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn)
214
{
38
{
215
- CPUState *cpu = current_cpu;
39
- TempOptInfo *ti = ts_info(ts);
216
- return plugin_disas(cpu, insn->vaddr, insn->len);
40
- return ti->is_const && ti->val == val;
217
+ return plugin_disas(tcg_ctx->cpu, tcg_ctx->plugin_db,
41
+ return ti_is_const_val(ts_info(ts), val);
218
+ insn->vaddr, insn->len);
219
}
42
}
220
43
221
const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn)
44
static inline bool arg_is_const(TCGArg arg)
222
--
45
--
223
2.34.1
46
2.43.0
224
225
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
1
Instead of returning a host pointer, copy the data into
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
storage provided by the caller.
2
Avoid double inversion of the value of second const operand.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/qemu/qemu-plugin.h | 15 +++++++--------
7
tcg/optimize.c | 21 +++++++++++----------
8
contrib/plugins/execlog.c | 5 +++--
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
contrib/plugins/howvec.c | 4 ++--
10
plugins/api.c | 7 +++++--
11
4 files changed, 17 insertions(+), 14 deletions(-)
12
9
13
diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/include/qemu/qemu-plugin.h
12
--- a/tcg/optimize.c
16
+++ b/include/qemu/qemu-plugin.h
13
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ typedef uint64_t qemu_plugin_id_t;
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
18
15
19
extern QEMU_PLUGIN_EXPORT int qemu_plugin_version;
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
20
21
-#define QEMU_PLUGIN_VERSION 2
22
+#define QEMU_PLUGIN_VERSION 3
23
24
/**
25
* struct qemu_info_t - system information for plugins
26
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_insn *
27
qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx);
28
29
/**
30
- * qemu_plugin_insn_data() - return ptr to instruction data
31
+ * qemu_plugin_insn_data() - copy instruction data
32
* @insn: opaque instruction handle from qemu_plugin_tb_get_insn()
33
+ * @dest: destination into which data is copied
34
+ * @len: length of dest
35
*
36
- * Note: data is only valid for duration of callback. See
37
- * qemu_plugin_insn_size() to calculate size of stream.
38
- *
39
- * Returns: pointer to a stream of bytes containing the value of this
40
- * instructions opcode.
41
+ * Returns the number of bytes copied, minimum of @len and insn size.
42
*/
43
QEMU_PLUGIN_API
44
-const void *qemu_plugin_insn_data(const struct qemu_plugin_insn *insn);
45
+size_t qemu_plugin_insn_data(const struct qemu_plugin_insn *insn,
46
+ void *dest, size_t len);
47
48
/**
49
* qemu_plugin_insn_size() - return size of instruction
50
diff --git a/contrib/plugins/execlog.c b/contrib/plugins/execlog.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/contrib/plugins/execlog.c
53
+++ b/contrib/plugins/execlog.c
54
@@ -XXX,XX +XXX,XX @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
55
NULL);
56
}
57
} else {
58
- uint32_t insn_opcode;
59
- insn_opcode = *((uint32_t *)qemu_plugin_insn_data(insn));
60
+ uint32_t insn_opcode = 0;
61
+ qemu_plugin_insn_data(insn, &insn_opcode, sizeof(insn_opcode));
62
+
63
char *output = g_strdup_printf("0x%"PRIx64", 0x%"PRIx32", \"%s\"",
64
insn_vaddr, insn_opcode, insn_disas);
65
66
diff --git a/contrib/plugins/howvec.c b/contrib/plugins/howvec.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/contrib/plugins/howvec.c
69
+++ b/contrib/plugins/howvec.c
70
@@ -XXX,XX +XXX,XX @@ static struct qemu_plugin_scoreboard *find_counter(
71
{
17
{
72
int i;
18
- uint64_t z1;
73
uint64_t *cnt = NULL;
19
+ uint64_t z_mask, s_mask;
74
- uint32_t opcode;
20
+ TempOptInfo *t1, *t2;
75
+ uint32_t opcode = 0;
21
76
InsnClassExecCount *class = NULL;
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
77
32
78
/*
33
/*
79
@@ -XXX,XX +XXX,XX @@ static struct qemu_plugin_scoreboard *find_counter(
34
* Known-zeros does not imply known-ones. Therefore unless
80
* They would probably benefit from a more tailored plugin.
35
* arg2 is constant, we can't infer anything from it.
81
* However we can fall back to individual instruction counting.
82
*/
36
*/
83
- opcode = *((uint32_t *)qemu_plugin_insn_data(insn));
37
- if (arg_is_const(op->args[2])) {
84
+ qemu_plugin_insn_data(insn, &opcode, sizeof(opcode));
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
85
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
86
for (i = 0; !cnt && i < class_table_sz; i++) {
40
+ if (ti_is_const(t2)) {
87
class = &class_table[i];
41
+ uint64_t v2 = ti_const_val(t2);
88
diff --git a/plugins/api.c b/plugins/api.c
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
89
index XXXXXXX..XXXXXXX 100644
43
return true;
90
--- a/plugins/api.c
44
}
91
+++ b/plugins/api.c
45
- z1 &= z2;
92
@@ -XXX,XX +XXX,XX @@ qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx)
46
+ z_mask &= ~v2;
93
* instruction being translated.
47
}
94
*/
48
- ctx->z_mask = z1;
95
49
96
-const void *qemu_plugin_insn_data(const struct qemu_plugin_insn *insn)
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
97
+size_t qemu_plugin_insn_data(const struct qemu_plugin_insn *insn,
51
- & arg_info(op->args[2])->s_mask;
98
+ void *dest, size_t len)
52
- return fold_masks(ctx, op);
99
{
53
+ s_mask = t1->s_mask & t2->s_mask;
100
- return insn->data->data;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
101
+ len = MIN(len, insn->data->len);
102
+ memcpy(dest, insn->data->data, len);
103
+ return len;
104
}
55
}
105
56
106
size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn)
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
107
--
58
--
108
2.34.1
59
2.43.0
109
110
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
15
return true;
16
}
17
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_z(ctx, op, z_mask);
50
}
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
New patch
1
The input which overlaps the sign bit of the output can
2
have its input s_mask propagated to the output s_mask.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 14 ++++++++++++--
8
1 file changed, 12 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
47
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
49
- return fold_masks_z(ctx, op, z_mask);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
}
17
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
36
}
37
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
39
- & arg_info(op->args[2])->s_mask;
40
- return false;
41
+ s_mask = arg_info(op->args[1])->s_mask
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
44
}
45
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
TCG register spill/fill uses tcg_out_ld/st with all types,
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
not necessarily going through INDEX_op_{ld,st}_vec.
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 6 +++---
5
1 file changed, 3 insertions(+), 3 deletions(-)
3
6
4
Cc: qemu-stable@nongnu.org
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
5
Fixes: 16288ded944 ("tcg/loongarch64: Lower basic tcg vec ops to LSX")
6
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2336
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Song Gao <gaosong@loongson.cn>
9
Tested-by: Song Gao <gaosong@loongson.cn>
10
---
11
tcg/loongarch64/tcg-target.c.inc | 103 ++++++++++++++++++++++++-------
12
1 file changed, 80 insertions(+), 23 deletions(-)
13
14
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
15
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/loongarch64/tcg-target.c.inc
9
--- a/tcg/optimize.c
17
+++ b/tcg/loongarch64/tcg-target.c.inc
10
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
19
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
20
}
17
}
21
18
22
-static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
23
- TCGReg arg1, intptr_t arg2)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
24
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg dest,
21
fold_xi_to_i(ctx, op, 0)) {
25
+ TCGReg base, intptr_t offset)
22
return true;
26
{
23
}
27
- bool is_32bit = type == TCG_TYPE_I32;
24
- return false;
28
- tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
25
+ return finish_folding(ctx, op);
29
+ switch (type) {
30
+ case TCG_TYPE_I32:
31
+ if (dest < TCG_REG_V0) {
32
+ tcg_out_ldst(s, OPC_LD_W, dest, base, offset);
33
+ } else {
34
+ tcg_out_dupm_vec(s, TCG_TYPE_I128, MO_32, dest, base, offset);
35
+ }
36
+ break;
37
+ case TCG_TYPE_I64:
38
+ if (dest < TCG_REG_V0) {
39
+ tcg_out_ldst(s, OPC_LD_D, dest, base, offset);
40
+ } else {
41
+ tcg_out_dupm_vec(s, TCG_TYPE_I128, MO_64, dest, base, offset);
42
+ }
43
+ break;
44
+ case TCG_TYPE_V128:
45
+ if (-0x800 <= offset && offset <= 0x7ff) {
46
+ tcg_out_opc_vld(s, dest, base, offset);
47
+ } else {
48
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
49
+ tcg_out_opc_vldx(s, dest, base, TCG_REG_TMP0);
50
+ }
51
+ break;
52
+ default:
53
+ g_assert_not_reached();
54
+ }
55
}
26
}
56
27
57
-static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
58
- TCGReg arg1, intptr_t arg2)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
59
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg src,
30
tcg_opt_gen_movi(ctx, op2, rh, h);
60
+ TCGReg base, intptr_t offset)
31
return true;
61
{
32
}
62
- bool is_32bit = type == TCG_TYPE_I32;
33
- return false;
63
- tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
34
+ return finish_folding(ctx, op);
64
+ switch (type) {
65
+ case TCG_TYPE_I32:
66
+ if (src < TCG_REG_V0) {
67
+ tcg_out_ldst(s, OPC_ST_W, src, base, offset);
68
+ } else {
69
+ /* TODO: Could use fst_s, fstx_s */
70
+ if (offset < -0x100 || offset > 0xff || (offset & 3)) {
71
+ if (-0x800 <= offset && offset <= 0x7ff) {
72
+ tcg_out_opc_addi_d(s, TCG_REG_TMP0, base, offset);
73
+ } else {
74
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
75
+ tcg_out_opc_add_d(s, TCG_REG_TMP0, TCG_REG_TMP0, base);
76
+ }
77
+ base = TCG_REG_TMP0;
78
+ offset = 0;
79
+ }
80
+ tcg_out_opc_vstelm_w(s, src, base, offset, 0);
81
+ }
82
+ break;
83
+ case TCG_TYPE_I64:
84
+ if (src < TCG_REG_V0) {
85
+ tcg_out_ldst(s, OPC_ST_D, src, base, offset);
86
+ } else {
87
+ /* TODO: Could use fst_d, fstx_d */
88
+ if (offset < -0x100 || offset > 0xff || (offset & 7)) {
89
+ if (-0x800 <= offset && offset <= 0x7ff) {
90
+ tcg_out_opc_addi_d(s, TCG_REG_TMP0, base, offset);
91
+ } else {
92
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
93
+ tcg_out_opc_add_d(s, TCG_REG_TMP0, TCG_REG_TMP0, base);
94
+ }
95
+ base = TCG_REG_TMP0;
96
+ offset = 0;
97
+ }
98
+ tcg_out_opc_vstelm_d(s, src, base, offset, 0);
99
+ }
100
+ break;
101
+ case TCG_TYPE_V128:
102
+ if (-0x800 <= offset && offset <= 0x7ff) {
103
+ tcg_out_opc_vst(s, src, base, offset);
104
+ } else {
105
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
106
+ tcg_out_opc_vstx(s, src, base, TCG_REG_TMP0);
107
+ }
108
+ break;
109
+ default:
110
+ g_assert_not_reached();
111
+ }
112
}
35
}
113
36
114
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
115
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
116
{
117
TCGType type = vecl + TCG_TYPE_V64;
118
TCGArg a0, a1, a2, a3;
119
- TCGReg temp = TCG_REG_TMP0;
120
TCGReg temp_vec = TCG_VEC_TMP0;
121
122
static const LoongArchInsn cmp_vec_insn[16][4] = {
123
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
124
125
switch (opc) {
126
case INDEX_op_st_vec:
127
- /* Try to fit vst imm */
128
- if (-0x800 <= a2 && a2 <= 0x7ff) {
129
- tcg_out_opc_vst(s, a0, a1, a2);
130
- } else {
131
- tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
132
- tcg_out_opc_vstx(s, a0, a1, temp);
133
- }
134
+ tcg_out_st(s, type, a0, a1, a2);
135
break;
136
case INDEX_op_ld_vec:
137
- /* Try to fit vld imm */
138
- if (-0x800 <= a2 && a2 <= 0x7ff) {
139
- tcg_out_opc_vld(s, a0, a1, a2);
140
- } else {
141
- tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
142
- tcg_out_opc_vldx(s, a0, a1, temp);
143
- }
144
+ tcg_out_ld(s, type, a0, a1, a2);
145
break;
146
case INDEX_op_and_vec:
147
tcg_out_opc_vand_v(s, a0, a1, a2);
148
--
38
--
149
2.34.1
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Be careful not to call fold_masks_zs when the memory operation
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
11
1 file changed, 21 insertions(+), 5 deletions(-)
12
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
18
return fold_masks_s(ctx, op, s_mask);
19
}
20
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
23
{
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
43
+
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
45
+}
46
+
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
48
+{
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
50
+ ctx->prev_mb = NULL;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
break;
57
case INDEX_op_qemu_ld_a32_i32:
58
case INDEX_op_qemu_ld_a64_i32:
59
+ done = fold_qemu_ld_1reg(&ctx, op);
60
+ break;
61
case INDEX_op_qemu_ld_a32_i64:
62
case INDEX_op_qemu_ld_a64_i64:
63
+ if (TCG_TARGET_REG_BITS == 64) {
64
+ done = fold_qemu_ld_1reg(&ctx, op);
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
73
case INDEX_op_qemu_st8_a32_i32:
74
case INDEX_op_qemu_st8_a64_i32:
75
--
76
2.43.0
diff view generated by jsdifflib
New patch
1
Stores have no output operands, and so need no further work.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 11 +++++------
7
1 file changed, 5 insertions(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
14
{
15
/* Opcodes that touch guest memory stop the mb optimization. */
16
ctx->prev_mb = NULL;
17
- return false;
18
+ return true;
19
}
20
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
23
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
25
remove_mem_copy_all(ctx);
26
- return false;
27
+ return true;
28
}
29
30
switch (op->opc) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
32
g_assert_not_reached();
33
}
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
35
- return false;
36
+ return true;
37
}
38
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
41
TCGType type;
42
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
44
- fold_tcg_st(ctx, op);
45
- return false;
46
+ return fold_tcg_st(ctx, op);
47
}
48
49
src = arg_temp(op->args[0]);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
51
last = ofs + tcg_type_size(type) - 1;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
55
+ return true;
56
}
57
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
59
--
60
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return finish_folding(ctx, op);
16
}
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
71
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
target/xtensa/translate.c | 3 +--
6
tcg/optimize.c | 3 +--
5
1 file changed, 1 insertion(+), 2 deletions(-)
7
1 file changed, 1 insertion(+), 2 deletions(-)
6
8
7
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
9
--- a/target/xtensa/translate.c
11
--- a/tcg/optimize.c
10
+++ b/target/xtensa/translate.c
12
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
12
#include "tcg/tcg-op.h"
14
fold_setcond_tst_pow2(ctx, op, false);
13
#include "qemu/log.h"
15
}
14
#include "qemu/qemu-print.h"
16
15
-#include "exec/cpu_ldst.h"
17
- ctx->z_mask = 1;
16
#include "semihosting/semihost.h"
18
- return false;
17
#include "exec/translator.h"
19
+ return fold_masks_z(ctx, op, 1);
18
19
@@ -XXX,XX +XXX,XX @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
20
21
static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc)
22
{
23
- uint8_t b0 = cpu_ldub_code(env, dc->pc);
24
+ uint8_t b0 = translator_ldub(env, &dc->base, dc->pc);
25
return xtensa_op0_insn_len(dc, b0);
26
}
20
}
27
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
28
--
23
--
29
2.34.1
24
2.43.0
30
31
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Avoid the use of the OptContext slots.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
target/avr/translate.c | 3 +--
6
tcg/optimize.c | 3 +--
5
1 file changed, 1 insertion(+), 2 deletions(-)
7
1 file changed, 1 insertion(+), 2 deletions(-)
6
8
7
diff --git a/target/avr/translate.c b/target/avr/translate.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
9
--- a/target/avr/translate.c
11
--- a/tcg/optimize.c
10
+++ b/target/avr/translate.c
12
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
12
#include "cpu.h"
14
}
13
#include "exec/exec-all.h"
15
14
#include "tcg/tcg-op.h"
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
15
-#include "exec/cpu_ldst.h"
17
- ctx->s_mask = -1;
16
#include "exec/helper-proto.h"
18
- return false;
17
#include "exec/helper-gen.h"
19
+ return fold_masks_s(ctx, op, -1);
18
#include "exec/log.h"
19
@@ -XXX,XX +XXX,XX @@ static int to_regs_00_30_by_two(DisasContext *ctx, int indx)
20
21
static uint16_t next_word(DisasContext *ctx)
22
{
23
- return cpu_lduw_code(ctx->env, ctx->npc++ * 2);
24
+ return translator_lduw(ctx->env, &ctx->base, ctx->npc++ * 2);
25
}
20
}
26
21
27
static int append_16(DisasContext *ctx, int x)
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
28
--
23
--
29
2.34.1
24
2.43.0
30
31
diff view generated by jsdifflib
1
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
1
Avoid the use of the OptContext slots.
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
target/microblaze/translate.c | 3 +--
6
tcg/optimize.c | 3 +--
6
1 file changed, 1 insertion(+), 2 deletions(-)
7
1 file changed, 1 insertion(+), 2 deletions(-)
7
8
8
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/target/microblaze/translate.c
11
--- a/tcg/optimize.c
11
+++ b/target/microblaze/translate.c
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
13
#include "tcg/tcg-op.h"
14
return fold_setcond(ctx, op);
14
#include "exec/helper-proto.h"
15
#include "exec/helper-gen.h"
16
-#include "exec/cpu_ldst.h"
17
#include "exec/translator.h"
18
#include "qemu/qemu-print.h"
19
20
@@ -XXX,XX +XXX,XX @@ static void mb_tr_translate_insn(DisasContextBase *dcb, CPUState *cs)
21
22
dc->tb_flags_to_set = 0;
23
24
- ir = cpu_ldl_code(cpu_env(cs), dc->base.pc_next);
25
+ ir = translator_ldl(cpu_env(cs), &dc->base, dc->base.pc_next);
26
if (!decode(dc, ir)) {
27
trap_illegal(dc, true);
28
}
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
29
--
23
--
30
2.34.1
24
2.43.0
31
32
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Yoshinori Sato <ysato@users.sourceforge.jp>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
target/rx/translate.c | 27 ++++++++++++++-------------
6
tcg/optimize.c | 27 ++++++++++++++-------------
6
1 file changed, 14 insertions(+), 13 deletions(-)
7
1 file changed, 14 insertions(+), 13 deletions(-)
7
8
8
diff --git a/target/rx/translate.c b/target/rx/translate.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/target/rx/translate.c
11
--- a/tcg/optimize.c
11
+++ b/target/rx/translate.c
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
13
#include "cpu.h"
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
14
#include "exec/exec-all.h"
15
#include "tcg/tcg-op.h"
16
-#include "exec/cpu_ldst.h"
17
#include "exec/helper-proto.h"
18
#include "exec/helper-gen.h"
19
#include "exec/translator.h"
20
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 cpu_acc;
21
22
/* decoder helper */
23
static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
24
- int i, int n)
25
+ int i, int n)
26
{
15
{
27
while (++i <= n) {
16
uint64_t s_mask, z_mask, sign;
28
- uint8_t b = cpu_ldub_code(ctx->env, ctx->base.pc_next++);
17
+ TempOptInfo *t1, *t2;
29
+ uint8_t b = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next++);
18
30
insn |= b << (32 - i * 8);
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
31
}
23
}
32
return insn;
24
33
@@ -XXX,XX +XXX,XX @@ static uint32_t li(DisasContext *ctx, int sz)
25
- s_mask = arg_info(op->args[1])->s_mask;
34
CPURXState *env = ctx->env;
26
- z_mask = arg_info(op->args[1])->z_mask;
35
addr = ctx->base.pc_next;
27
+ t1 = arg_info(op->args[1]);
36
28
+ t2 = arg_info(op->args[2]);
37
- tcg_debug_assert(sz < 4);
29
+ s_mask = t1->s_mask;
38
switch (sz) {
30
+ z_mask = t1->z_mask;
39
case 1:
31
40
ctx->base.pc_next += 1;
32
- if (arg_is_const(op->args[2])) {
41
- return cpu_ldsb_code(env, addr);
33
- int sh = arg_info(op->args[2])->val;
42
+ return (int8_t)translator_ldub(env, &ctx->base, addr);
34
-
43
case 2:
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
44
ctx->base.pc_next += 2;
36
+ if (ti_is_const(t2)) {
45
- return cpu_ldsw_code(env, addr);
37
+ int sh = ti_const_val(t2);
46
+ return (int16_t)translator_lduw(env, &ctx->base, addr);
38
47
case 3:
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
48
ctx->base.pc_next += 3;
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
49
- tmp = cpu_ldsb_code(env, addr + 2) << 16;
41
50
- tmp |= cpu_lduw_code(env, addr) & 0xffff;
42
- return fold_masks(ctx, op);
51
+ tmp = (int8_t)translator_ldub(env, &ctx->base, addr + 2);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
52
+ tmp <<= 16;
53
+ tmp |= translator_lduw(env, &ctx->base, addr);
54
return tmp;
55
case 0:
56
ctx->base.pc_next += 4;
57
- return cpu_ldl_code(env, addr);
58
+ return translator_ldl(env, &ctx->base, addr);
59
+ default:
60
+ g_assert_not_reached();
61
}
44
}
62
return 0;
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
66
break;
67
default:
68
break;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
63
}
73
}
64
@@ -XXX,XX +XXX,XX @@ static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
74
65
{
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
66
uint32_t dsp;
67
68
- tcg_debug_assert(ld < 3);
69
switch (ld) {
70
case 0:
71
return cpu_regs[reg];
72
case 1:
73
- dsp = cpu_ldub_code(ctx->env, ctx->base.pc_next) << size;
74
+ dsp = translator_ldub(ctx->env, &ctx->base, ctx->base.pc_next) << size;
75
tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
76
ctx->base.pc_next += 1;
77
return mem;
78
case 2:
79
- dsp = cpu_lduw_code(ctx->env, ctx->base.pc_next) << size;
80
+ dsp = translator_lduw(ctx->env, &ctx->base, ctx->base.pc_next) << size;
81
tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
82
ctx->base.pc_next += 2;
83
return mem;
84
+ default:
85
+ g_assert_not_reached();
86
}
87
- return NULL;
88
}
89
90
static inline MemOp mi_to_mop(unsigned mi)
91
--
76
--
92
2.34.1
77
2.43.0
93
94
diff view generated by jsdifflib
New patch
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
16
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t s_mask, z_mask, sign;
20
+ uint64_t s_mask, z_mask;
21
TempOptInfo *t1, *t2;
22
23
if (fold_const2(ctx, op) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
25
* If the sign bit is known zero, then logical right shift
26
* will not reduce the number of input sign repetitions.
27
*/
28
- sign = -s_mask;
29
- if (sign && !(z_mask & sign)) {
30
+ if (~z_mask & -s_mask) {
31
return fold_masks_s(ctx, op, s_mask);
32
}
33
break;
34
--
35
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
target/s390x/tcg/translate.c | 5 +++--
7
tcg/optimize.c | 9 ++++++---
5
1 file changed, 3 insertions(+), 2 deletions(-)
8
1 file changed, 6 insertions(+), 3 deletions(-)
6
9
7
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/target/s390x/tcg/translate.c
12
--- a/tcg/optimize.c
10
+++ b/target/s390x/tcg/translate.c
13
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool s390x_tr_disas_log(const DisasContextBase *dcbase,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
12
DisasContext *dc = container_of(dcbase, DisasContext, base);
15
fold_sub_to_neg(ctx, op)) {
13
14
if (unlikely(dc->ex_value)) {
15
- /* ??? Unfortunately target_disas can't use host memory. */
16
- fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
17
+ /* The ex_value has been recorded with translator_fake_ld. */
18
+ fprintf(logfile, "IN: EXECUTE\n");
19
+ target_disas(logfile, cs, &dc->base);
20
return true;
16
return true;
21
}
17
}
22
return false;
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
21
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
{
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
38
}
39
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
23
--
41
--
24
2.34.1
42
2.43.0
25
26
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 16 +++++++++-------
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
3
---
4
include/tcg/tcg.h | 1 +
4
tcg/optimize.c | 2 +-
5
accel/tcg/plugin-gen.c | 1 +
5
1 file changed, 1 insertion(+), 1 deletion(-)
6
2 files changed, 2 insertions(+)
7
6
8
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
10
--- a/include/tcg/tcg.h
9
--- a/tcg/optimize.c
11
+++ b/include/tcg/tcg.h
10
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
13
* space for instructions (for variable-instruction-length ISAs).
12
TCGType type;
14
*/
13
15
struct qemu_plugin_tb *plugin_tb;
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
16
+ const struct DisasContextBase *plugin_db;
15
- return false;
17
16
+ return finish_folding(ctx, op);
18
/* descriptor of the instruction being translated */
19
struct qemu_plugin_insn *plugin_insn;
20
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/accel/tcg/plugin-gen.c
23
+++ b/accel/tcg/plugin-gen.c
24
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
25
tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
26
}
17
}
27
18
28
+ tcg_ctx->plugin_db = db;
19
type = ctx->type;
29
tcg_ctx->plugin_insn = NULL;
30
31
return ret;
32
--
20
--
33
2.34.1
21
2.43.0
34
35
diff view generated by jsdifflib
1
We don't need to allocate plugin context at startup,
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
we can wait until we actually use it.
2
Remove fold_masks as the function becomes unused.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
accel/tcg/plugin-gen.c | 36 ++++++++++++++++++++----------------
7
tcg/optimize.c | 18 ++++++++----------
8
tcg/tcg.c | 11 -----------
8
1 file changed, 8 insertions(+), 10 deletions(-)
9
2 files changed, 20 insertions(+), 27 deletions(-)
10
9
11
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/plugin-gen.c
12
--- a/tcg/optimize.c
14
+++ b/accel/tcg/plugin-gen.c
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
16
15
return fold_masks_zs(ctx, op, -1, s_mask);
17
bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db)
18
{
19
- bool ret = false;
20
+ struct qemu_plugin_tb *ptb;
21
22
- if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) {
23
- struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
24
-
25
- /* reset callbacks */
26
- if (ptb->cbs) {
27
- g_array_set_size(ptb->cbs, 0);
28
- }
29
- ptb->n = 0;
30
-
31
- ret = true;
32
-
33
- ptb->mem_helper = false;
34
-
35
- tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
36
+ if (!test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS,
37
+ cpu->plugin_state->event_mask)) {
38
+ return false;
39
}
40
41
tcg_ctx->plugin_db = db;
42
tcg_ctx->plugin_insn = NULL;
43
+ ptb = tcg_ctx->plugin_tb;
44
45
- return ret;
46
+ if (ptb) {
47
+ /* Reset callbacks */
48
+ if (ptb->cbs) {
49
+ g_array_set_size(ptb->cbs, 0);
50
+ }
51
+ ptb->n = 0;
52
+ ptb->mem_helper = false;
53
+ } else {
54
+ ptb = g_new0(struct qemu_plugin_tb, 1);
55
+ tcg_ctx->plugin_tb = ptb;
56
+ ptb->insns = g_ptr_array_new();
57
+ }
58
+
59
+ tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
60
+ return true;
61
}
16
}
62
17
63
void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
64
diff --git a/tcg/tcg.c b/tcg/tcg.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/tcg.c
67
+++ b/tcg/tcg.c
68
@@ -XXX,XX +XXX,XX @@ QEMU_BUILD_BUG_ON((int)(offsetof(CPUNegativeOffsetState, tlb.f[0]) -
69
< MIN_TLB_MASK_TABLE_OFS);
70
#endif
71
72
-static void alloc_tcg_plugin_context(TCGContext *s)
73
-{
19
-{
74
-#ifdef CONFIG_PLUGIN
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
75
- s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
76
- s->plugin_tb->insns = g_ptr_array_new();
77
-#endif
78
-}
21
-}
79
-
22
-
80
/*
23
/*
81
* All TCG threads except the parent (i.e. the one that called tcg_context_init
24
* An "affected" mask bit is 0 if and only if the result is identical
82
* and registered the target's TCG globals) must register with this function
25
* to the first input. Thus if the entire mask is 0, the operation
83
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
84
qatomic_set(&tcg_ctxs[n], s);
27
85
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
86
if (n > 0) {
29
{
87
- alloc_tcg_plugin_context(s);
30
+ uint64_t z_mask, s_mask;
88
tcg_region_initial_alloc(s);
31
+ TempOptInfo *t1, *t2;
32
+
33
if (fold_const2_commutative(ctx, op) ||
34
fold_xx_to_i(ctx, op, 0) ||
35
fold_xi_to_x(ctx, op, 0) ||
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
89
}
38
}
90
39
91
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(unsigned max_cpus)
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
92
indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i];
41
- | arg_info(op->args[2])->z_mask;
93
}
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
94
43
- & arg_info(op->args[2])->s_mask;
95
- alloc_tcg_plugin_context(s);
44
- return fold_masks(ctx, op);
96
-
45
+ t1 = arg_info(op->args[1]);
97
tcg_ctx = s;
46
+ t2 = arg_info(op->args[2]);
98
/*
47
+ z_mask = t1->z_mask | t2->z_mask;
99
* In user-mode we simply share the init context among threads, since we
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
100
--
53
--
101
2.34.1
54
2.43.0
102
103
diff view generated by jsdifflib
1
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
3
---
5
target/riscv/translate.c | 6 +++---
4
tcg/optimize.c | 2 +-
6
1 file changed, 3 insertions(+), 3 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
7
6
8
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
10
--- a/target/riscv/translate.c
9
--- a/tcg/optimize.c
11
+++ b/target/riscv/translate.c
10
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
13
#include "qemu/log.h"
12
return fold_orc(ctx, op);
14
#include "cpu.h"
13
}
15
#include "tcg/tcg-op.h"
14
}
16
-#include "exec/cpu_ldst.h"
15
- return false;
17
#include "exec/exec-all.h"
16
+ return finish_folding(ctx, op);
18
#include "exec/helper-proto.h"
19
#include "exec/helper-gen.h"
20
@@ -XXX,XX +XXX,XX @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
21
CPUState *cpu = ctx->cs;
22
CPURISCVState *env = cpu_env(cpu);
23
24
- return cpu_ldl_code(env, pc);
25
+ return translator_ldl(env, &ctx->base, pc);
26
}
17
}
27
18
28
/* Include insn module translation function */
19
/* Propagate constants and copies, fold constant expressions. */
29
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
30
unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
31
32
if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) {
33
- uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next);
34
+ uint16_t next_insn =
35
+ translator_lduw(env, &ctx->base, ctx->base.pc_next);
36
int len = insn_len(next_insn);
37
38
if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) {
39
--
20
--
40
2.34.1
21
2.43.0
41
42
diff view generated by jsdifflib
New patch
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 ++----
8
1 file changed, 2 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
21
-
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
All mask setting is now done with parameters via fold_masks_*.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 -------------
7
1 file changed, 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
15
16
/* In flight values from optimization. */
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
47
--
48
2.43.0
diff view generated by jsdifflib
1
The ilen value extracted from ex_value is the length of the
1
All instances of s_mask have been converted to the new
2
EXECUTE instruction itself, and so is the increment to the pc.
2
representation. We can now re-enable usage.
3
However, the length of the synthetic insn is located in the
4
opcode like all other instructions.
5
3
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
target/s390x/tcg/translate.c | 4 ++--
7
tcg/optimize.c | 4 ++--
10
1 file changed, 2 insertions(+), 2 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
11
9
12
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/target/s390x/tcg/translate.c
12
--- a/tcg/optimize.c
15
+++ b/target/s390x/tcg/translate.c
13
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
17
/* Extract the values saved by EXECUTE. */
15
g_assert_not_reached();
18
insn = s->ex_value & 0xffffffffffff0000ull;
16
}
19
ilen = s->ex_value & 0xf;
17
20
+ op = insn >> 56;
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
21
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
22
/* Register insn bytes with translator so plugins work. */
20
return true;
23
be_insn = cpu_to_be64(insn);
21
}
24
- translator_fake_ld(&s->base, &be_insn, ilen);
22
25
- op = insn >> 56;
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
26
+ translator_fake_ld(&s->base, &be_insn, get_ilen(op));
24
s_mask = s_mask_old >> pos;
27
} else {
25
s_mask |= -1ull << (len - 1);
28
insn = ld_code2(env, s, pc);
26
29
op = (insn >> 8) & 0xff;
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
return true;
30
}
31
30
--
32
--
31
2.34.1
33
2.43.0
32
33
diff view generated by jsdifflib
1
This will be able to replace plugin_insn_append, and will
1
The big comment just above says functions should be sorted.
2
be usable for disassembly.
2
Add forward declarations as needed.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/exec/translator.h | 12 ++++++++++++
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
8
accel/tcg/translator.c | 41 +++++++++++++++++++++++++++++++++++++++
8
1 file changed, 59 insertions(+), 55 deletions(-)
9
2 files changed, 53 insertions(+)
10
9
11
diff --git a/include/exec/translator.h b/include/exec/translator.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/translator.h
12
--- a/tcg/optimize.c
14
+++ b/include/exec/translator.h
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContextBase {
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
16
bool plugin_enabled;
15
* 3) those that produce information about the result value.
17
struct TCGOp *insn_start;
16
*/
18
void *host_addr[2];
17
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
19
+
21
+
20
+ /*
22
static bool fold_add(OptContext *ctx, TCGOp *op)
21
+ * Record insn data that we cannot read directly from host memory.
23
{
22
+ * There are only two reasons we cannot use host memory:
24
if (fold_const2_commutative(ctx, op) ||
23
+ * (1) We are executing from I/O,
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
24
+ * (2) We are executing a synthetic instruction (s390x EX).
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
25
+ * In both cases we need record exactly one instruction,
26
+ * and thus the maximum amount of data we record is limited.
27
+ */
28
+ int record_start;
29
+ int record_len;
30
+ uint8_t record[32];
31
} DisasContextBase;
32
33
/**
34
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/accel/tcg/translator.c
37
+++ b/accel/tcg/translator.c
38
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
39
db->insn_start = NULL;
40
db->host_addr[0] = host_pc;
41
db->host_addr[1] = NULL;
42
+ db->record_start = 0;
43
+ db->record_len = 0;
44
45
ops->init_disas_context(db, cpu);
46
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
47
@@ -XXX,XX +XXX,XX @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db,
48
return true;
49
}
27
}
50
28
51
+static void record_save(DisasContextBase *db, vaddr pc,
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
52
+ const void *from, int size)
53
+{
30
+{
54
+ int offset;
31
+ /* If true and false values are the same, eliminate the cmp. */
55
+
32
+ if (args_are_copies(op->args[2], op->args[3])) {
56
+ /* Do not record probes before the start of TB. */
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
57
+ if (pc < db->pc_first) {
58
+ return;
59
+ }
34
+ }
60
+
35
+
61
+ /*
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
62
+ * In translator_access, we verified that pc is within 2 pages
37
+ uint64_t tv = arg_info(op->args[2])->val;
63
+ * of pc_first, thus this will never overflow.
38
+ uint64_t fv = arg_info(op->args[3])->val;
64
+ */
65
+ offset = pc - db->pc_first;
66
+
39
+
67
+ /*
40
+ if (tv == -1 && fv == 0) {
68
+ * Either the first or second page may be I/O. If it is the second,
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
69
+ * then the first byte we need to record will be at a non-zero offset.
42
+ }
70
+ * In either case, we should not need to record but a single insn.
43
+ if (tv == 0 && fv == -1) {
71
+ */
44
+ if (TCG_TARGET_HAS_not_vec) {
72
+ if (db->record_len == 0) {
45
+ op->opc = INDEX_op_not_vec;
73
+ db->record_start = offset;
46
+ return fold_not(ctx, op);
74
+ db->record_len = size;
47
+ } else {
75
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
76
+ assert(offset == db->record_start + db->record_len);
49
+ op->args[2] = arg_new_constant(ctx, -1);
77
+ assert(db->record_len + size <= sizeof(db->record));
50
+ return fold_xor(ctx, op);
78
+ db->record_len += size;
51
+ }
52
+ }
79
+ }
53
+ }
80
+
54
+ if (arg_is_const(op->args[2])) {
81
+ memcpy(db->record + (offset - db->record_start), from, size);
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
82
+}
82
+}
83
+
83
+
84
static void plugin_insn_append(vaddr pc, const void *from, size_t size)
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
85
{
86
#ifdef CONFIG_PLUGIN
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
87
@@ -XXX,XX +XXX,XX @@ uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
89
if (!translator_ld(env, db, &raw, pc, sizeof(raw))) {
89
}
90
raw = cpu_ldub_code(env, pc);
90
91
+ record_save(db, pc, &raw, sizeof(raw));
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
92
}
92
-{
93
plugin_insn_append(pc, &raw, sizeof(raw));
93
- /* If true and false values are the same, eliminate the cmp. */
94
return raw;
94
- if (args_are_copies(op->args[2], op->args[3])) {
95
@@ -XXX,XX +XXX,XX @@ uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc)
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
96
} else {
96
- }
97
tgt = cpu_lduw_code(env, pc);
97
-
98
raw = tswap16(tgt);
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
99
+ record_save(db, pc, &raw, sizeof(raw));
99
- uint64_t tv = arg_info(op->args[2])->val;
100
}
100
- uint64_t fv = arg_info(op->args[3])->val;
101
plugin_insn_append(pc, &raw, sizeof(raw));
101
-
102
return tgt;
102
- if (tv == -1 && fv == 0) {
103
@@ -XXX,XX +XXX,XX @@ uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc)
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
104
} else {
104
- }
105
tgt = cpu_ldl_code(env, pc);
105
- if (tv == 0 && fv == -1) {
106
raw = tswap32(tgt);
106
- if (TCG_TARGET_HAS_not_vec) {
107
+ record_save(db, pc, &raw, sizeof(raw));
107
- op->opc = INDEX_op_not_vec;
108
}
108
- return fold_not(ctx, op);
109
plugin_insn_append(pc, &raw, sizeof(raw));
109
- } else {
110
return tgt;
110
- op->opc = INDEX_op_xor_vec;
111
@@ -XXX,XX +XXX,XX @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
111
- op->args[2] = arg_new_constant(ctx, -1);
112
} else {
112
- return fold_xor(ctx, op);
113
tgt = cpu_ldq_code(env, pc);
113
- }
114
raw = tswap64(tgt);
114
- }
115
+ record_save(db, pc, &raw, sizeof(raw));
115
- }
116
}
116
- if (arg_is_const(op->args[2])) {
117
plugin_insn_append(pc, &raw, sizeof(raw));
117
- uint64_t tv = arg_info(op->args[2])->val;
118
return tgt;
118
- if (tv == -1) {
119
@@ -XXX,XX +XXX,XX @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
119
- op->opc = INDEX_op_or_vec;
120
120
- op->args[2] = op->args[3];
121
void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8)
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
144
-}
145
-
146
/* Propagate constants and copies, fold constant expressions. */
147
void tcg_optimize(TCGContext *s)
122
{
148
{
123
+ assert(pc >= db->pc_first);
124
+ record_save(db, pc, &insn8, sizeof(insn8));
125
plugin_insn_append(pc, &insn8, sizeof(insn8));
126
}
127
--
149
--
128
2.34.1
150
2.43.0
129
130
diff view generated by jsdifflib
1
Copy data out of a completed translation. This will be used
1
The big comment just above says functions should be sorted.
2
for both plugins and disassembly.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
include/exec/translator.h | 23 ++++++++++++++++
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
8
accel/tcg/translator.c | 55 +++++++++++++++++++++++++++++++++++++++
7
1 file changed, 30 insertions(+), 30 deletions(-)
9
2 files changed, 78 insertions(+)
10
8
11
diff --git a/include/exec/translator.h b/include/exec/translator.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/translator.h
11
--- a/tcg/optimize.c
14
+++ b/include/exec/translator.h
12
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
*/
14
return true;
17
void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8);
18
19
+/**
20
+ * translator_st
21
+ * @db: disassembly context
22
+ * @dest: address to copy into
23
+ * @addr: virtual address within TB
24
+ * @len: length
25
+ *
26
+ * Copy @len bytes from @addr into @dest.
27
+ * All bytes must have been read during translation.
28
+ * Return true on success or false on failure.
29
+ */
30
+bool translator_st(const DisasContextBase *db, void *dest,
31
+ vaddr addr, size_t len);
32
+
33
+/**
34
+ * translator_st_len
35
+ * @db: disassembly context
36
+ *
37
+ * Return the number of bytes available to copy from the
38
+ * current translation block with translator_st.
39
+ */
40
+size_t translator_st_len(const DisasContextBase *db);
41
+
42
#ifdef COMPILING_PER_TARGET
43
/*
44
* Return whether addr is on the same page as where disassembly started.
45
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/accel/tcg/translator.c
48
+++ b/accel/tcg/translator.c
49
@@ -XXX,XX +XXX,XX @@ static void record_save(DisasContextBase *db, vaddr pc,
50
memcpy(db->record + (offset - db->record_start), from, size);
51
}
15
}
52
16
53
+size_t translator_st_len(const DisasContextBase *db)
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
54
+{
18
+{
55
+ return db->fake_insn ? db->record_len : db->tb->size;
19
+ /* Canonicalize the comparison to put immediate second. */
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
22
+ }
23
+ return finish_folding(ctx, op);
56
+}
24
+}
57
+
25
+
58
+bool translator_st(const DisasContextBase *db, void *dest,
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
59
+ vaddr addr, size_t len)
60
+{
27
+{
61
+ size_t offset, offset_end;
28
+ /* If true and false values are the same, eliminate the cmp. */
62
+
29
+ if (args_are_copies(op->args[3], op->args[4])) {
63
+ if (addr < db->pc_first) {
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
64
+ return false;
65
+ }
66
+ offset = addr - db->pc_first;
67
+ offset_end = offset + len;
68
+ if (offset_end > translator_st_len(db)) {
69
+ return false;
70
+ }
31
+ }
71
+
32
+
72
+ if (!db->fake_insn) {
33
+ /* Canonicalize the comparison to put immediate second. */
73
+ size_t offset_page1 = -(db->pc_first | TARGET_PAGE_MASK);
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
74
+
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
75
+ /* Get all the bytes from the first page. */
76
+ if (db->host_addr[0]) {
77
+ if (offset_end <= offset_page1) {
78
+ memcpy(dest, db->host_addr[0] + offset, len);
79
+ return true;
80
+ }
81
+ if (offset < offset_page1) {
82
+ size_t len0 = offset_page1 - offset;
83
+ memcpy(dest, db->host_addr[0] + offset, len0);
84
+ offset += len0;
85
+ dest += len0;
86
+ }
87
+ }
88
+
89
+ /* Get any bytes from the second page. */
90
+ if (db->host_addr[1] && offset >= offset_page1) {
91
+ memcpy(dest, db->host_addr[1] + (offset - offset_page1),
92
+ offset_end - offset);
93
+ return true;
94
+ }
95
+ }
36
+ }
96
+
37
+ /*
97
+ /* Else get recorded bytes. */
38
+ * Canonicalize the "false" input reg to match the destination,
98
+ if (db->record_len != 0 &&
39
+ * so that the tcg backend can implement "move if true".
99
+ offset >= db->record_start &&
40
+ */
100
+ offset_end <= db->record_start + db->record_len) {
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
101
+ memcpy(dest, db->record + (offset - db->record_start),
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
102
+ offset_end - offset);
103
+ return true;
104
+ }
43
+ }
105
+ return false;
44
+ return finish_folding(ctx, op);
106
+}
45
+}
107
+
46
+
108
static void plugin_insn_append(vaddr pc, const void *from, size_t size)
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
109
{
48
{
110
#ifdef CONFIG_PLUGIN
49
uint64_t z_mask, s_mask;
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
52
}
53
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
55
-{
56
- /* Canonicalize the comparison to put immediate second. */
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
58
- op->args[3] = tcg_swap_cond(op->args[3]);
59
- }
60
- return finish_folding(ctx, op);
61
-}
62
-
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
64
-{
65
- /* If true and false values are the same, eliminate the cmp. */
66
- if (args_are_copies(op->args[3], op->args[4])) {
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
68
- }
69
-
70
- /* Canonicalize the comparison to put immediate second. */
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
82
-}
83
-
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
85
{
86
uint64_t z_mask, s_mask, s_mask_old;
111
--
87
--
112
2.34.1
88
2.43.0
113
114
diff view generated by jsdifflib
1
Do not allow translation to proceed beyond one insn with mmio,
1
We currently have a flag, float_muladd_halve_result, to scale
2
as we will not be caching the TranslationBlock.
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
accel/tcg/translator.c | 4 ++++
7
include/fpu/softfloat.h | 6 ++++
8
1 file changed, 4 insertions(+)
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
9
9
fpu/softfloat-parts.c.inc | 7 +++--
10
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
11
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/translator.c
14
--- a/include/fpu/softfloat.h
13
+++ b/accel/tcg/translator.c
15
+++ b/include/fpu/softfloat.h
14
@@ -XXX,XX +XXX,XX @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db,
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
15
17
float16 float16_sub(float16, float16, float_status *status);
16
/* Use slow path if first page is MMIO. */
18
float16 float16_mul(float16, float16, float_status *status);
17
if (unlikely(tb_page_addr0(tb) == -1)) {
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
18
+ /* We capped translation with first page MMIO in tb_gen_code. */
20
+float16 float16_muladd_scalbn(float16, float16, float16,
19
+ tcg_debug_assert(db->max_insns == 1);
21
+ int, int, float_status *status);
20
return false;
22
float16 float16_div(float16, float16, float_status *status);
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
99
+}
100
+
101
+float32 QEMU_SOFTFLOAT_ATTR
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
103
+ int scale, int flags, float_status *status)
104
{
105
FloatParts64 pa, pb, pc, *pr;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
21
}
187
}
22
188
23
@@ -XXX,XX +XXX,XX @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db,
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
24
if (unlikely(new_page1 == -1)) {
190
index XXXXXXX..XXXXXXX 100644
25
tb_unlock_pages(tb);
191
--- a/fpu/softfloat-parts.c.inc
26
tb_set_page_addr0(tb, -1);
192
+++ b/fpu/softfloat-parts.c.inc
27
+ /* Require that this be the final insn. */
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
28
+ db->max_insns = db->num_insns;
194
* Requires A and C extracted into a double-sized structure to provide the
29
return false;
195
* extra space for the widening multiply.
30
}
196
*/
31
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
32
--
217
--
33
2.34.1
218
2.43.0
34
219
35
220
diff view generated by jsdifflib
1
Do not pass around a boolean between multiple structures,
1
Use the scalbn interface instead of float_muladd_halve_result.
2
just read it from the TranslationBlock in the TCGContext.
3
2
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
include/exec/plugin-gen.h | 7 +++----
6
target/arm/tcg/helper-a64.c | 6 +++---
8
include/qemu/plugin.h | 3 ---
7
1 file changed, 3 insertions(+), 3 deletions(-)
9
accel/tcg/plugin-gen.c | 4 +---
10
accel/tcg/translator.c | 2 +-
11
plugins/api.c | 14 +++++++++-----
12
5 files changed, 14 insertions(+), 16 deletions(-)
13
8
14
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
15
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/plugin-gen.h
11
--- a/target/arm/tcg/helper-a64.c
17
+++ b/include/exec/plugin-gen.h
12
+++ b/target/arm/tcg/helper-a64.c
18
@@ -XXX,XX +XXX,XX @@ struct DisasContextBase;
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
19
14
(float16_is_infinity(b) && float16_is_zero(a))) {
20
#ifdef CONFIG_PLUGIN
15
return float16_one_point_five;
21
16
}
22
-bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db,
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
23
- bool supress);
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
24
+bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db);
25
void plugin_gen_tb_end(CPUState *cpu, size_t num_insns);
26
void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db);
27
void plugin_gen_insn_end(void);
28
@@ -XXX,XX +XXX,XX @@ void plugin_gen_disable_mem_helpers(void);
29
30
#else /* !CONFIG_PLUGIN */
31
32
-static inline bool
33
-plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, bool sup)
34
+static inline
35
+bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db)
36
{
37
return false;
38
}
19
}
39
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
20
40
index XXXXXXX..XXXXXXX 100644
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
41
--- a/include/qemu/plugin.h
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
42
+++ b/include/qemu/plugin.h
23
(float32_is_infinity(b) && float32_is_zero(a))) {
43
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_insn {
24
return float32_one_point_five;
44
45
/* if set, the instruction calls helpers that might access guest memory */
46
bool mem_helper;
47
-
48
- bool mem_only;
49
};
50
51
/* A scoreboard is an array of values, indexed by vcpu_index */
52
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_tb {
53
uint64_t vaddr2;
54
void *haddr1;
55
void *haddr2;
56
- bool mem_only;
57
58
/* if set, the TB calls helpers that might access guest memory */
59
bool mem_helper;
60
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/accel/tcg/plugin-gen.c
63
+++ b/accel/tcg/plugin-gen.c
64
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
65
}
25
}
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
66
}
28
}
67
29
68
-bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
69
- bool mem_only)
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
70
+bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db)
32
(float64_is_infinity(b) && float64_is_zero(a))) {
71
{
33
return float64_one_point_five;
72
bool ret = false;
34
}
73
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
74
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
75
ptb->vaddr2 = -1;
76
ptb->haddr1 = db->host_addr[0];
77
ptb->haddr2 = NULL;
78
- ptb->mem_only = mem_only;
79
ptb->mem_helper = false;
80
81
tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
82
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/accel/tcg/translator.c
85
+++ b/accel/tcg/translator.c
86
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
87
ops->tb_start(db, cpu);
88
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
89
90
- plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
91
+ plugin_enabled = plugin_gen_tb_start(cpu, db);
92
db->plugin_enabled = plugin_enabled;
93
94
while (true) {
95
diff --git a/plugins/api.c b/plugins/api.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/plugins/api.c
98
+++ b/plugins/api.c
99
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id,
100
plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_EXIT, cb);
101
}
37
}
102
38
103
+static bool tb_is_mem_only(void)
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
104
+{
105
+ return tb_cflags(tcg_ctx->gen_tb) & CF_MEMI_ONLY;
106
+}
107
+
108
void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
109
qemu_plugin_vcpu_udata_cb_t cb,
110
enum qemu_plugin_cb_flags flags,
111
void *udata)
112
{
113
- if (!tb->mem_only) {
114
+ if (!tb_is_mem_only()) {
115
plugin_register_dyn_cb__udata(&tb->cbs, cb, flags, udata);
116
}
117
}
118
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
119
qemu_plugin_u64 entry,
120
uint64_t imm)
121
{
122
- if (!tb->mem_only) {
123
+ if (!tb_is_mem_only()) {
124
plugin_register_inline_op_on_entry(&tb->cbs, 0, op, entry, imm);
125
}
126
}
127
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
128
enum qemu_plugin_cb_flags flags,
129
void *udata)
130
{
131
- if (!insn->mem_only) {
132
+ if (!tb_is_mem_only()) {
133
plugin_register_dyn_cb__udata(&insn->insn_cbs, cb, flags, udata);
134
}
135
}
136
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
137
qemu_plugin_u64 entry,
138
uint64_t imm)
139
{
140
- if (!insn->mem_only) {
141
+ if (!tb_is_mem_only()) {
142
plugin_register_inline_op_on_entry(&insn->insn_cbs, 0, op, entry, imm);
143
}
144
}
145
@@ -XXX,XX +XXX,XX @@ qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx)
146
return NULL;
147
}
148
insn = g_ptr_array_index(tb->insns, idx);
149
- insn->mem_only = tb->mem_only;
150
return insn;
151
}
152
153
--
40
--
154
2.34.1
41
2.43.0
155
42
156
43
diff view generated by jsdifflib
1
Use the scalbn interface instead of float_muladd_halve_result.
2
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
include/exec/translator.h | 5 +++--
6
target/sparc/helper.h | 4 +-
5
accel/tcg/translator.c | 2 +-
7
target/sparc/fop_helper.c | 8 ++--
6
target/s390x/tcg/translate.c | 2 +-
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
7
3 files changed, 5 insertions(+), 4 deletions(-)
9
3 files changed, 54 insertions(+), 38 deletions(-)
8
10
9
diff --git a/include/exec/translator.h b/include/exec/translator.h
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
10
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
11
--- a/include/exec/translator.h
13
--- a/target/sparc/helper.h
12
+++ b/include/exec/translator.h
14
+++ b/target/sparc/helper.h
13
@@ -XXX,XX +XXX,XX @@ translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
14
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
15
/**
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
16
* translator_fake_ldb - fake instruction load
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
17
- * @insn8: byte of instruction
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
18
+ * @db: Disassembly context
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
19
* @pc: program counter of instruction
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
20
+ * @insn8: byte of instruction
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
21
*
23
22
* This is a special case helper used where the instruction we are
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
23
* about to translate comes from somewhere else (e.g. being
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
24
* re-synthesised for s390x "ex"). It ensures we update other areas of
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
25
* the translator with details of the executed instruction.
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
*/
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
27
-void translator_fake_ldb(uint8_t insn8, vaddr pc);
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
28
+void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8);
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
29
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
30
#ifdef COMPILING_PER_TARGET
32
31
/*
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
32
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
33
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
34
--- a/accel/tcg/translator.c
35
--- a/target/sparc/fop_helper.c
35
+++ b/accel/tcg/translator.c
36
+++ b/target/sparc/fop_helper.c
36
@@ -XXX,XX +XXX,XX @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
37
return ret;
47
return ret;
38
}
48
}
39
49
40
-void translator_fake_ldb(uint8_t insn8, vaddr pc)
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
41
+void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8)
51
- float64 s2, float64 s3, uint32_t op)
42
{
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
43
plugin_insn_append(pc, &insn8, sizeof(insn8));
53
{
44
}
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
45
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
46
index XXXXXXX..XXXXXXX 100644
60
index XXXXXXX..XXXXXXX 100644
47
--- a/target/s390x/tcg/translate.c
61
--- a/target/sparc/translate.c
48
+++ b/target/s390x/tcg/translate.c
62
+++ b/target/sparc/translate.c
49
@@ -XXX,XX +XXX,XX @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
50
/* Register insn bytes with translator so plugins work. */
64
51
for (int i = 0; i < ilen; i++) {
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
52
uint8_t byte = extract64(insn, 56 - (i * 8), 8);
66
{
53
- translator_fake_ldb(byte, pc + i);
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
54
+ translator_fake_ldb(&s->base, pc + i, byte);
68
+ TCGv_i32 z = tcg_constant_i32(0);
55
}
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
56
op = insn >> 56;
70
}
57
} else {
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
58
--
205
--
59
2.34.1
206
2.43.0
60
207
61
208
diff view generated by jsdifflib
1
While there are other methods that could be used to replace
1
All uses have been convered to float*_muladd_scalbn.
2
TARGET_PAGE_MASK, the function is not really required outside
3
the context of target-specific translation.
4
5
This makes the header usable by target independent code.
6
2
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
include/exec/translator.h | 2 ++
6
include/fpu/softfloat.h | 3 ---
11
1 file changed, 2 insertions(+)
7
fpu/softfloat.c | 6 ------
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
12
10
13
diff --git a/include/exec/translator.h b/include/exec/translator.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/translator.h
13
--- a/include/fpu/softfloat.h
16
+++ b/include/exec/translator.h
14
+++ b/include/fpu/softfloat.h
17
@@ -XXX,XX +XXX,XX @@ translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
18
*/
16
| Using these differs from negating an input or output before calling
19
void translator_fake_ldb(uint8_t insn8, vaddr pc);
17
| the muladd function in that this means that a NaN doesn't have its
20
18
| sign bit inverted before it is propagated.
21
+#ifdef COMPILING_PER_TARGET
19
-| We also support halving the result before rounding, as a special
22
/*
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
23
* Return whether addr is on the same page as where disassembly started.
21
*----------------------------------------------------------------------------*/
24
* Translators can use this to enforce the rule that only single-insn
22
enum {
25
@@ -XXX,XX +XXX,XX @@ static inline bool is_same_page(const DisasContextBase *db, vaddr addr)
23
float_muladd_negate_c = 1,
26
{
24
float_muladd_negate_product = 2,
27
return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
25
float_muladd_negate_result = 4,
28
}
26
- float_muladd_halve_result = 8,
29
+#endif
27
};
30
28
31
#endif /* EXEC__TRANSLATOR_H */
29
/*----------------------------------------------------------------------------
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/fpu/softfloat.c
33
+++ b/fpu/softfloat.c
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
35
if (unlikely(!can_use_fpu(s))) {
36
goto soft;
37
}
38
- if (unlikely(flags & float_muladd_halve_result)) {
39
- goto soft;
40
- }
41
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
55
index XXXXXXX..XXXXXXX 100644
56
--- a/fpu/softfloat-parts.c.inc
57
+++ b/fpu/softfloat-parts.c.inc
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
59
a->exp = p_widen.exp;
60
61
return_normal:
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
63
- if (flags & float_muladd_halve_result) {
64
- a->exp -= 1;
65
- }
66
a->exp += scale;
67
finish_sign:
68
if (flags & float_muladd_negate_result) {
32
--
69
--
33
2.34.1
70
2.43.0
34
71
35
72
diff view generated by jsdifflib
New patch
1
This rounding mode is used by Hexagon.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/fpu/softfloat-types.h | 2 ++
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
8
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/fpu/softfloat-types.h
12
+++ b/include/fpu/softfloat-types.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
21
/*
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/fpu/softfloat-parts.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
36
--
37
2.43.0
diff view generated by jsdifflib
New patch
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/fpu/softfloat.h | 5 +++++
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
39
}
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
61
--
62
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
1
There are no special cases for this instruction.
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Remove internal_mpyf as unused.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
target/cris/translate.c | 25 ++++++++-----------------
7
target/hexagon/fma_emu.h | 1 -
6
1 file changed, 8 insertions(+), 17 deletions(-)
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
7
11
8
diff --git a/target/cris/translate.c b/target/cris/translate.c
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
9
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
10
--- a/target/cris/translate.c
14
--- a/target/hexagon/fma_emu.h
11
+++ b/target/cris/translate.c
15
+++ b/target/hexagon/fma_emu.h
12
@@ -XXX,XX +XXX,XX @@ static int sign_extend(unsigned int val, unsigned int width)
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
13
}
30
}
14
31
15
static int cris_fetch(CPUCRISState *env, DisasContext *dc, uint32_t addr,
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
16
- unsigned int size, unsigned int sign)
33
-{
17
+ unsigned int size, bool sign)
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
38
-}
39
-
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
18
{
48
{
19
int r;
49
float32 RdV;
20
50
arch_fpop_start(env);
21
switch (size) {
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
22
case 4:
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
23
- {
53
arch_fpop_end(env);
24
- r = cpu_ldl_code(env, addr);
54
return RdV;
25
+ r = translator_ldl(env, &dc->base, addr);
26
break;
27
- }
28
case 2:
29
- {
30
+ r = translator_lduw(env, &dc->base, addr);
31
if (sign) {
32
- r = cpu_ldsw_code(env, addr);
33
- } else {
34
- r = cpu_lduw_code(env, addr);
35
+ r = (int16_t)r;
36
}
37
break;
38
- }
39
case 1:
40
- {
41
+ r = translator_ldub(env, &dc->base, addr);
42
if (sign) {
43
- r = cpu_ldsb_code(env, addr);
44
- } else {
45
- r = cpu_ldub_code(env, addr);
46
+ r = (int8_t)r;
47
}
48
break;
49
- }
50
default:
51
- cpu_abort(CPU(dc->cpu), "Invalid fetch size %d\n", size);
52
- break;
53
+ g_assert_not_reached();
54
}
55
return r;
56
}
55
}
57
@@ -XXX,XX +XXX,XX @@ static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
58
int i;
59
60
/* Load a halfword onto the instruction register. */
61
- dc->ir = cris_fetch(env, dc, dc->pc, 2, 0);
62
+ dc->ir = cris_fetch(env, dc, dc->pc, 2, 0);
63
64
/* Now decode it. */
65
dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
66
--
56
--
67
2.34.1
57
2.43.0
68
69
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
7
---
4
target/s390x/tcg/translate.c | 3 +--
8
target/hexagon/op_helper.c | 5 ++---
5
1 file changed, 1 insertion(+), 2 deletions(-)
9
1 file changed, 2 insertions(+), 3 deletions(-)
6
10
7
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
8
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
9
--- a/target/s390x/tcg/translate.c
13
--- a/target/hexagon/op_helper.c
10
+++ b/target/s390x/tcg/translate.c
14
+++ b/target/hexagon/op_helper.c
11
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
12
#include "tcg/tcg-op-gvec.h"
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
13
#include "qemu/log.h"
17
float32 RsV, float32 RtV)
14
#include "qemu/host-utils.h"
15
-#include "exec/cpu_ldst.h"
16
#include "exec/helper-proto.h"
17
#include "exec/helper-gen.h"
18
19
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
20
static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
21
uint64_t pc)
22
{
18
{
23
- uint64_t insn = cpu_lduw_code(env, pc);
19
- float32 neg_RsV;
24
+ uint64_t insn = translator_lduw(env, &s->base, pc);
20
arch_fpop_start(env);
25
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
26
return pc + get_ilen((insn >> 8) & 0xff);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
27
}
28
--
28
--
29
2.34.1
29
2.43.0
30
31
diff view generated by jsdifflib
1
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
1
This instruction has a special case that 0 * x + c returns c
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
5
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
8
---
5
target/cris/translate.c | 1 -
9
target/hexagon/op_helper.c | 11 +++--------
6
target/cris/translate_v10.c.inc | 30 +++++++++---------------------
10
1 file changed, 3 insertions(+), 8 deletions(-)
7
2 files changed, 9 insertions(+), 22 deletions(-)
8
11
9
diff --git a/target/cris/translate.c b/target/cris/translate.c
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
11
--- a/target/cris/translate.c
14
--- a/target/hexagon/op_helper.c
12
+++ b/target/cris/translate.c
15
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
14
#include "tcg/tcg-op.h"
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
15
#include "exec/helper-proto.h"
18
float32 RsV, float32 RtV, float32 PuV)
16
#include "mmu.h"
19
{
17
-#include "exec/cpu_ldst.h"
20
- size4s_t tmp;
18
#include "exec/translator.h"
21
arch_fpop_start(env);
19
#include "crisv32-decode.h"
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
20
#include "qemu/qemu-print.h"
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
21
diff --git a/target/cris/translate_v10.c.inc b/target/cris/translate_v10.c.inc
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
22
index XXXXXXX..XXXXXXX 100644
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
23
--- a/target/cris/translate_v10.c.inc
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
24
+++ b/target/cris/translate_v10.c.inc
27
- RxV = tmp;
25
@@ -XXX,XX +XXX,XX @@ static int dec10_prep_move_m(CPUCRISState *env, DisasContext *dc,
28
- }
26
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
27
/* Load [$rs] onto T1. */
30
+ float_muladd_suppress_add_product_zero,
28
if (is_imm) {
31
+ &env->fp_status);
29
- if (memsize != 4) {
32
arch_fpop_end(env);
30
- if (s_ext) {
33
return RxV;
31
- if (memsize == 1)
34
}
32
- imm = cpu_ldsb_code(env, dc->pc + 2);
33
- else
34
- imm = cpu_ldsw_code(env, dc->pc + 2);
35
- } else {
36
- if (memsize == 1)
37
- imm = cpu_ldub_code(env, dc->pc + 2);
38
- else
39
- imm = cpu_lduw_code(env, dc->pc + 2);
40
- }
41
- } else
42
- imm = cpu_ldl_code(env, dc->pc + 2);
43
+ imm = cris_fetch(env, dc, dc->pc + 2, memsize, s_ext);
44
45
tcg_gen_movi_tl(dst, imm);
46
47
@@ -XXX,XX +XXX,XX @@ static int dec10_dip(CPUCRISState *env, DisasContext *dc)
48
LOG_DIS("dip pc=%x opcode=%d r%d r%d\n",
49
dc->pc, dc->opcode, dc->src, dc->dst);
50
if (dc->src == 15) {
51
- imm = cpu_ldl_code(env, dc->pc + 2);
52
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
53
tcg_gen_movi_tl(cpu_PR[PR_PREFIX], imm);
54
- if (dc->postinc)
55
+ if (dc->postinc) {
56
insn_len += 4;
57
+ }
58
tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len - 2);
59
} else {
60
gen_load(dc, cpu_PR[PR_PREFIX], cpu_R[dc->src], 4, 0);
61
@@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
62
if (dc->src == 15) {
63
LOG_DIS("jump.%d %d r%d r%d direct\n", size,
64
dc->opcode, dc->src, dc->dst);
65
- imm = cpu_ldl_code(env, dc->pc + 2);
66
- if (dc->mode == CRISV10_MODE_AUTOINC)
67
+ imm = cris_fetch(env, dc, dc->pc + 2, size, 0);
68
+ if (dc->mode == CRISV10_MODE_AUTOINC) {
69
insn_len += size;
70
-
71
+ }
72
c = tcg_constant_tl(dc->pc + insn_len);
73
t_gen_mov_preg_TN(dc, dc->dst, c);
74
dc->jmp_pc = imm;
75
@@ -XXX,XX +XXX,XX @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
76
case CRISV10_IND_BCC_M:
77
78
cris_cc_mask(dc, 0);
79
- simm = cpu_ldsw_code(env, dc->pc + 2);
80
+ simm = cris_fetch(env, dc, dc->pc + 2, 2, 1);
81
simm += 4;
82
83
LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm);
84
@@ -XXX,XX +XXX,XX @@ static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc)
85
unsigned int insn_len = 2;
86
87
/* Load a halfword onto the instruction register. */
88
- dc->ir = cpu_lduw_code(env, dc->pc);
89
+ dc->ir = cris_fetch(env, dc, dc->pc, 2, 0);
90
91
/* Now decode it. */
92
dc->opcode = EXTRACT_FIELD(dc->ir, 6, 9);
93
--
35
--
94
2.34.1
36
2.43.0
95
96
diff view generated by jsdifflib
1
Use the bytes that we record for the entire TB, rather than
1
There are multiple special cases for this instruction.
2
a per-insn GByteArray. Record the length of the insn in
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
plugin_gen_insn_end rather than infering from the length
3
handled by the new float_round_nearest_even_max rounding mode.
4
of the array.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
5
8
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
11
---
9
include/qemu/plugin.h | 14 +-------------
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
10
accel/tcg/plugin-gen.c | 7 +++++--
13
1 file changed, 26 insertions(+), 79 deletions(-)
11
accel/tcg/translator.c | 26 --------------------------
12
plugins/api.c | 12 +++++++-----
13
tcg/tcg.c | 3 +--
14
5 files changed, 14 insertions(+), 48 deletions(-)
15
14
16
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/include/qemu/plugin.h
17
--- a/target/hexagon/op_helper.c
19
+++ b/include/qemu/plugin.h
18
+++ b/target/hexagon/op_helper.c
20
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_dyn_cb {
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
21
20
return RxV;
22
/* Internal context for instrumenting an instruction */
21
}
23
struct qemu_plugin_insn {
22
24
- GByteArray *data;
23
-static bool is_zero_prod(float32 a, float32 b)
25
uint64_t vaddr;
26
void *haddr;
27
GArray *insn_cbs;
28
GArray *mem_cbs;
29
+ uint8_t len;
30
bool calls_helpers;
31
32
/* if set, the instruction calls helpers that might access guest memory */
33
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_scoreboard {
34
QLIST_ENTRY(qemu_plugin_scoreboard) entry;
35
};
36
37
-/*
38
- * qemu_plugin_insn allocate and cleanup functions. We don't expect to
39
- * cleanup many of these structures. They are reused for each fresh
40
- * translation.
41
- */
42
-
43
-static inline void qemu_plugin_insn_cleanup_fn(gpointer data)
44
-{
24
-{
45
- struct qemu_plugin_insn *insn = (struct qemu_plugin_insn *) data;
25
- return ((float32_is_zero(a) && is_finite(b)) ||
46
- g_byte_array_free(insn->data, true);
26
- (float32_is_zero(b) && is_finite(a)));
47
-}
27
-}
48
-
28
-
49
/* Internal context for this TranslationBlock */
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
50
struct qemu_plugin_tb {
51
GPtrArray *insns;
52
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/accel/tcg/plugin-gen.c
55
+++ b/accel/tcg/plugin-gen.c
56
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
57
ptb->n = n;
58
if (n <= ptb->insns->len) {
59
insn = g_ptr_array_index(ptb->insns, n - 1);
60
- g_byte_array_set_size(insn->data, 0);
61
} else {
62
assert(n - 1 == ptb->insns->len);
63
insn = g_new0(struct qemu_plugin_insn, 1);
64
- insn->data = g_byte_array_sized_new(4);
65
g_ptr_array_add(ptb->insns, insn);
66
}
67
68
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
69
70
void plugin_gen_insn_end(void)
71
{
72
+ const DisasContextBase *db = tcg_ctx->plugin_db;
73
+ struct qemu_plugin_insn *pinsn = tcg_ctx->plugin_insn;
74
+
75
+ pinsn->len = db->fake_insn ? db->record_len : db->pc_next - pinsn->vaddr;
76
+
77
tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_INSN);
78
}
79
80
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
81
index XXXXXXX..XXXXXXX 100644
82
--- a/accel/tcg/translator.c
83
+++ b/accel/tcg/translator.c
84
@@ -XXX,XX +XXX,XX @@ bool translator_st(const DisasContextBase *db, void *dest,
85
return false;
86
}
87
88
-static void plugin_insn_append(vaddr pc, const void *from, size_t size)
89
-{
30
-{
90
-#ifdef CONFIG_PLUGIN
31
- float32 ret = dst;
91
- struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn;
32
- if (float32_is_any_nan(x)) {
92
- size_t off;
33
- if (extract32(x, 22, 1) == 0) {
93
-
34
- float_raise(float_flag_invalid, fp_status);
94
- if (insn == NULL) {
35
- }
95
- return;
36
- ret = make_float32(0xffffffff); /* nan */
96
- }
37
- }
97
- off = pc - insn->vaddr;
38
- return ret;
98
- if (off < insn->data->len) {
99
- g_byte_array_set_size(insn->data, off);
100
- } else if (off > insn->data->len) {
101
- /* we have an unexpected gap */
102
- g_assert_not_reached();
103
- }
104
-
105
- insn->data = g_byte_array_append(insn->data, from, size);
106
-#endif
107
-}
39
-}
108
-
40
-
109
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
42
float32 RsV, float32 RtV, float32 PuV)
110
{
43
{
111
uint8_t raw;
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
112
@@ -XXX,XX +XXX,XX @@ uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
45
return RxV;
113
raw = cpu_ldub_code(env, pc);
114
record_save(db, pc, &raw, sizeof(raw));
115
}
116
- plugin_insn_append(pc, &raw, sizeof(raw));
117
return raw;
118
}
46
}
119
47
120
@@ -XXX,XX +XXX,XX @@ uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc)
48
-static bool is_inf_prod(int32_t a, int32_t b)
121
raw = tswap16(tgt);
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
122
record_save(db, pc, &raw, sizeof(raw));
50
+ float32 RsV, float32 RtV, int negate)
123
}
51
{
124
- plugin_insn_append(pc, &raw, sizeof(raw));
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
125
return tgt;
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
55
+ int flags;
56
+
57
+ arch_fpop_start(env);
58
+
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
60
+ RxV = float32_muladd(RsV, RtV, RxV,
61
+ negate | float_muladd_suppress_add_product_zero,
62
+ &env->fp_status);
63
+
64
+ flags = get_float_exception_flags(&env->fp_status);
65
+ if (flags) {
66
+ /* Flags are suppressed by this instruction. */
67
+ set_float_exception_flags(0, &env->fp_status);
68
+
69
+ /* Return 0 for Inf - Inf. */
70
+ if (flags & float_flag_invalid_isi) {
71
+ RxV = 0;
72
+ }
73
+ }
74
+
75
+ arch_fpop_end(env);
76
+ return RxV;
126
}
77
}
127
78
128
@@ -XXX,XX +XXX,XX @@ uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc)
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
129
raw = tswap32(tgt);
80
float32 RsV, float32 RtV)
130
record_save(db, pc, &raw, sizeof(raw));
81
{
131
}
82
- bool infinp;
132
- plugin_insn_append(pc, &raw, sizeof(raw));
83
- bool infminusinf;
133
return tgt;
84
- float32 tmp;
85
-
86
- arch_fpop_start(env);
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
88
- infminusinf = float32_is_infinity(RxV) &&
89
- is_inf_prod(RsV, RtV) &&
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
91
- infinp = float32_is_infinity(RxV) ||
92
- float32_is_infinity(RtV) ||
93
- float32_is_infinity(RsV);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
99
- RxV = tmp;
100
- }
101
- set_float_exception_flags(0, &env->fp_status);
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
134
}
111
}
135
112
136
@@ -XXX,XX +XXX,XX @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
137
raw = tswap64(tgt);
114
float32 RsV, float32 RtV)
138
record_save(db, pc, &raw, sizeof(raw));
115
{
139
}
116
- bool infinp;
140
- plugin_insn_append(pc, &raw, sizeof(raw));
117
- bool infminusinf;
141
return tgt;
118
- float32 tmp;
119
-
120
- arch_fpop_start(env);
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
122
- infminusinf = float32_is_infinity(RxV) &&
123
- is_inf_prod(RsV, RtV) &&
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
125
- infinp = float32_is_infinity(RxV) ||
126
- float32_is_infinity(RtV) ||
127
- float32_is_infinity(RsV);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
142
}
146
}
143
147
144
@@ -XXX,XX +XXX,XX @@ void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8)
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
145
assert(pc >= db->pc_first);
146
db->fake_insn = true;
147
record_save(db, pc, &insn8, sizeof(insn8));
148
- plugin_insn_append(pc, &insn8, sizeof(insn8));
149
}
150
diff --git a/plugins/api.c b/plugins/api.c
151
index XXXXXXX..XXXXXXX 100644
152
--- a/plugins/api.c
153
+++ b/plugins/api.c
154
@@ -XXX,XX +XXX,XX @@
155
#include "tcg/tcg.h"
156
#include "exec/exec-all.h"
157
#include "exec/gdbstub.h"
158
+#include "exec/translator.h"
159
#include "disas/disas.h"
160
#include "plugin.h"
161
#ifndef CONFIG_USER_ONLY
162
@@ -XXX,XX +XXX,XX @@ qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx)
163
size_t qemu_plugin_insn_data(const struct qemu_plugin_insn *insn,
164
void *dest, size_t len)
165
{
166
- len = MIN(len, insn->data->len);
167
- memcpy(dest, insn->data->data, len);
168
- return len;
169
+ const DisasContextBase *db = tcg_ctx->plugin_db;
170
+
171
+ len = MIN(len, insn->len);
172
+ return translator_st(db, dest, insn->vaddr, len) ? len : 0;
173
}
174
175
size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn)
176
{
177
- return insn->data->len;
178
+ return insn->len;
179
}
180
181
uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn)
182
@@ -XXX,XX +XXX,XX @@ void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn)
183
char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn)
184
{
185
CPUState *cpu = current_cpu;
186
- return plugin_disas(cpu, insn->vaddr, insn->data->len);
187
+ return plugin_disas(cpu, insn->vaddr, insn->len);
188
}
189
190
const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn)
191
diff --git a/tcg/tcg.c b/tcg/tcg.c
192
index XXXXXXX..XXXXXXX 100644
193
--- a/tcg/tcg.c
194
+++ b/tcg/tcg.c
195
@@ -XXX,XX +XXX,XX @@ static void alloc_tcg_plugin_context(TCGContext *s)
196
{
197
#ifdef CONFIG_PLUGIN
198
s->plugin_tb = g_new0(struct qemu_plugin_tb, 1);
199
- s->plugin_tb->insns =
200
- g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn);
201
+ s->plugin_tb->insns = g_ptr_array_new();
202
#endif
203
}
204
205
--
149
--
206
2.34.1
150
2.43.0
207
208
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
The function is now unused.
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
5
---
4
target/i386/tcg/translate.c | 8 +++-----
6
target/hexagon/fma_emu.h | 2 -
5
1 file changed, 3 insertions(+), 5 deletions(-)
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
6
9
7
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/target/i386/tcg/translate.c
12
--- a/target/hexagon/fma_emu.h
10
+++ b/target/i386/tcg/translate.c
13
+++ b/target/hexagon/fma_emu.h
11
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
12
#include "exec/exec-all.h"
15
}
13
#include "tcg/tcg-op.h"
16
int32_t float32_getexp(float32 f32);
14
#include "tcg/tcg-op-gvec.h"
17
float32 infinite_float32(uint8_t sign);
15
-#include "exec/cpu_ldst.h"
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
16
#include "exec/translator.h"
19
- int scale, float_status *fp_status);
17
#include "fpu/softfloat.h"
20
float64 internal_mpyhh(float64 a, float64 b,
18
21
unsigned long long int accumulated,
19
@@ -XXX,XX +XXX,XX @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
22
float_status *fp_status);
20
* This can happen even if the operand is only one byte long!
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
21
*/
24
index XXXXXXX..XXXXXXX 100644
22
if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
25
--- a/target/hexagon/fma_emu.c
23
- volatile uint8_t unused =
26
+++ b/target/hexagon/fma_emu.c
24
- cpu_ldub_code(env, (s->pc - 1) & TARGET_PAGE_MASK);
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
25
- (void) unused;
28
return -1;
26
+ (void)translator_ldub(env, &s->base,
29
}
27
+ (s->pc - 1) & TARGET_PAGE_MASK);
30
28
}
31
-static uint64_t float32_getmant(float32 f32)
29
siglongjmp(s->jmpbuf, 1);
32
-{
30
}
33
- Float a = { .i = f32 };
31
@@ -XXX,XX +XXX,XX @@ static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
34
- if (float32_is_normal(f32)) {
32
35
- return a.mant | 1ULL << 23;
33
fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
36
- }
34
for (; pc < end; ++pc) {
37
- if (float32_is_zero(f32)) {
35
- fprintf(logfile, " %02x", cpu_ldub_code(env, pc));
38
- return 0;
36
+ fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
39
- }
37
}
40
- if (float32_is_denormal(f32)) {
38
fprintf(logfile, "\n");
41
- return a.mant;
39
qemu_log_unlock(logfile);
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
40
--
219
--
41
2.34.1
220
2.43.0
42
43
diff view generated by jsdifflib
1
Replace translator_fake_ldb, which required multiple calls,
1
This massive macro is now only used once.
2
with translator_fake_ld, which can take all data at once.
2
Expand it for use only by float64.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/exec/translator.h | 8 ++++----
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
8
accel/tcg/translator.c | 5 ++---
8
1 file changed, 127 insertions(+), 128 deletions(-)
9
target/s390x/tcg/translate.c | 8 ++++----
10
3 files changed, 10 insertions(+), 11 deletions(-)
11
9
12
diff --git a/include/exec/translator.h b/include/exec/translator.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/translator.h
12
--- a/target/hexagon/fma_emu.c
15
+++ b/include/exec/translator.h
13
+++ b/target/hexagon/fma_emu.c
16
@@ -XXX,XX +XXX,XX @@ translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
17
}
15
}
18
16
19
/**
17
/* Return a maximum finite value with the requested sign */
20
- * translator_fake_ldb - fake instruction load
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
21
+ * translator_fake_ld - fake instruction load
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
22
* @db: Disassembly context
20
-{ \
23
- * @pc: program counter of instruction
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
24
- * @insn8: byte of instruction
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
25
+ * @data: bytes of instruction
23
- /* result zero */ \
26
+ * @len: number of bytes
24
- switch (fp_status->float_rounding_mode) { \
27
*
25
- case float_round_down: \
28
* This is a special case helper used where the instruction we are
26
- return zero_##SUFFIX(1); \
29
* about to translate comes from somewhere else (e.g. being
27
- default: \
30
* re-synthesised for s390x "ex"). It ensures we update other areas of
28
- return zero_##SUFFIX(0); \
31
* the translator with details of the executed instruction.
29
- } \
32
*/
30
- } \
33
-void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8);
31
- /* Normalize right */ \
34
+void translator_fake_ld(DisasContextBase *db, const void *data, size_t len);
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
35
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
36
/**
34
- /* So we need to normalize right while the high word is non-zero and \
37
* translator_st
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
38
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
36
- while ((int128_gethi(a.mant) != 0) || \
39
index XXXXXXX..XXXXXXX 100644
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
40
--- a/accel/tcg/translator.c
38
- a = accum_norm_right(a, 1); \
41
+++ b/accel/tcg/translator.c
39
- } \
42
@@ -XXX,XX +XXX,XX @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
40
- /* \
43
return tgt;
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
44
}
271
}
45
272
46
-void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8)
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
47
+void translator_fake_ld(DisasContextBase *db, const void *data, size_t len)
274
-
48
{
275
float64 internal_mpyhh(float64 a, float64 b,
49
- assert(pc >= db->pc_first);
276
unsigned long long int accumulated,
50
db->fake_insn = true;
277
float_status *fp_status)
51
- record_save(db, pc, &insn8, sizeof(insn8));
52
+ record_save(db, db->pc_first, data, len);
53
}
54
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/s390x/tcg/translate.c
57
+++ b/target/s390x/tcg/translate.c
58
@@ -XXX,XX +XXX,XX @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
59
const DisasInsn *info;
60
61
if (unlikely(s->ex_value)) {
62
+ uint64_t be_insn;
63
+
64
/* Drop the EX data now, so that it's clear on exception paths. */
65
tcg_gen_st_i64(tcg_constant_i64(0), tcg_env,
66
offsetof(CPUS390XState, ex_value));
67
@@ -XXX,XX +XXX,XX @@ static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s)
68
ilen = s->ex_value & 0xf;
69
70
/* Register insn bytes with translator so plugins work. */
71
- for (int i = 0; i < ilen; i++) {
72
- uint8_t byte = extract64(insn, 56 - (i * 8), 8);
73
- translator_fake_ldb(&s->base, pc + i, byte);
74
- }
75
+ be_insn = cpu_to_be64(insn);
76
+ translator_fake_ld(&s->base, &be_insn, ilen);
77
op = insn >> 56;
78
} else {
79
insn = ld_code2(env, s, pc);
80
--
278
--
81
2.34.1
279
2.43.0
82
83
diff view generated by jsdifflib
1
Remove left-over comment from commit dcd092a063
1
This structure, with bitfields, is incorrect for big-endian.
2
("accel/tcg: Improve can_do_io management").
2
Use the existing float32_getexp_raw which uses extract32.
3
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/exec/translator.h | 3 ++-
7
target/hexagon/fma_emu.c | 16 +++-------------
8
accel/tcg/translator.c | 2 ++
8
1 file changed, 3 insertions(+), 13 deletions(-)
9
2 files changed, 4 insertions(+), 1 deletion(-)
10
9
11
diff --git a/include/exec/translator.h b/include/exec/translator.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/translator.h
12
--- a/target/hexagon/fma_emu.c
14
+++ b/include/exec/translator.h
13
+++ b/target/hexagon/fma_emu.c
15
@@ -XXX,XX +XXX,XX @@ typedef enum DisasJumpType {
14
@@ -XXX,XX +XXX,XX @@ typedef union {
16
* @num_insns: Number of translated instructions (including current).
15
};
17
* @max_insns: Maximum number of instructions to be translated in this TB.
16
} Double;
18
* @singlestep_enabled: "Hardware" single stepping enabled.
17
19
- * @saved_can_do_io: Known value of cpu->neg.can_do_io, or -1 for unknown.
18
-typedef union {
20
* @plugin_enabled: TCG plugin enabled in this TB.
19
- float f;
21
+ * @fake_insn: True if translator_fake_ldb used.
20
- uint32_t i;
22
* @insn_start: The last op emitted by the insn_start hook,
21
- struct {
23
* which is expected to be INDEX_op_insn_start.
22
- uint32_t mant:23;
24
*
23
- uint32_t exp:8;
25
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContextBase {
24
- uint32_t sign:1;
26
int max_insns;
25
- };
27
bool singlestep_enabled;
26
-} Float;
28
bool plugin_enabled;
27
-
29
+ bool fake_insn;
28
static uint64_t float64_getmant(float64 f64)
30
struct TCGOp *insn_start;
31
void *host_addr[2];
32
33
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/accel/tcg/translator.c
36
+++ b/accel/tcg/translator.c
37
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
38
db->max_insns = *max_insns;
39
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
40
db->insn_start = NULL;
41
+ db->fake_insn = false;
42
db->host_addr[0] = host_pc;
43
db->host_addr[1] = NULL;
44
db->record_start = 0;
45
@@ -XXX,XX +XXX,XX @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
46
void translator_fake_ldb(DisasContextBase *db, vaddr pc, uint8_t insn8)
47
{
29
{
48
assert(pc >= db->pc_first);
30
Double a = { .i = f64 };
49
+ db->fake_insn = true;
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
50
record_save(db, pc, &insn8, sizeof(insn8));
32
51
plugin_insn_append(pc, &insn8, sizeof(insn8));
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
52
}
46
}
53
--
47
--
54
2.34.1
48
2.43.0
55
56
diff view generated by jsdifflib
1
We have eliminated most uses of this hook. Reduce
1
This structure, with bitfields, is incorrect for big-endian.
2
further by allowing the hook to handle only the
2
Use extract64 and deposit64 instead.
3
special cases, returning false for normal processing.
4
3
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
include/exec/translator.h | 2 +-
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
9
accel/tcg/translator.c | 5 ++---
8
1 file changed, 16 insertions(+), 30 deletions(-)
10
target/hppa/translate.c | 15 ++++++---------
11
target/s390x/tcg/translate.c | 8 +++-----
12
4 files changed, 12 insertions(+), 18 deletions(-)
13
9
14
diff --git a/include/exec/translator.h b/include/exec/translator.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/translator.h
12
--- a/target/hexagon/fma_emu.c
17
+++ b/include/exec/translator.h
13
+++ b/target/hexagon/fma_emu.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
19
void (*insn_start)(DisasContextBase *db, CPUState *cpu);
20
void (*translate_insn)(DisasContextBase *db, CPUState *cpu);
21
void (*tb_stop)(DisasContextBase *db, CPUState *cpu);
22
- void (*disas_log)(const DisasContextBase *db, CPUState *cpu, FILE *f);
23
+ bool (*disas_log)(const DisasContextBase *db, CPUState *cpu, FILE *f);
24
} TranslatorOps;
25
26
/**
27
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/accel/tcg/translator.c
30
+++ b/accel/tcg/translator.c
31
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
32
if (logfile) {
33
fprintf(logfile, "----------------\n");
34
35
- if (ops->disas_log) {
36
- ops->disas_log(db, cpu, logfile);
37
- } else {
38
+ if (!ops->disas_log ||
39
+ !ops->disas_log(db, cpu, logfile)) {
40
fprintf(logfile, "IN: %s\n", lookup_symbol(db->pc_first));
41
target_disas(logfile, cpu, db->pc_first, db->tb->size);
42
}
43
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hppa/translate.c
46
+++ b/target/hppa/translate.c
47
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@
48
15
49
#include "qemu/osdep.h"
16
#define WAY_BIG_EXP 4096
50
#include "cpu.h"
17
51
-#include "disas/disas.h"
18
-typedef union {
52
#include "qemu/host-utils.h"
19
- double f;
53
#include "exec/exec-all.h"
20
- uint64_t i;
54
#include "exec/page-protection.h"
21
- struct {
55
@@ -XXX,XX +XXX,XX @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
22
- uint64_t mant:52;
23
- uint64_t exp:11;
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
- Double a = { .i = f64 };
31
+ uint64_t mant = extract64(f64, 0, 52);
32
if (float64_is_normal(f64)) {
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
56
}
44
}
57
45
58
#ifdef CONFIG_USER_ONLY
46
int32_t float64_getexp(float64 f64)
59
-static void hppa_tr_disas_log(const DisasContextBase *dcbase,
60
+static bool hppa_tr_disas_log(const DisasContextBase *dcbase,
61
CPUState *cs, FILE *logfile)
62
{
47
{
63
target_ulong pc = dcbase->pc_first;
48
- Double a = { .i = f64 };
64
@@ -XXX,XX +XXX,XX @@ static void hppa_tr_disas_log(const DisasContextBase *dcbase,
49
+ int exp = extract64(f64, 52, 11);
65
switch (pc) {
50
if (float64_is_normal(f64)) {
66
case 0x00:
51
- return a.exp;
67
fprintf(logfile, "IN:\n0x00000000: (null)\n");
52
+ return exp;
68
- return;
69
+ return true;
70
case 0xb0:
71
fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
72
- return;
73
+ return true;
74
case 0xe0:
75
fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
76
- return;
77
+ return true;
78
case 0x100:
79
fprintf(logfile, "IN:\n0x00000100: syscall\n");
80
- return;
81
+ return true;
82
}
53
}
83
-
54
if (float64_is_denormal(f64)) {
84
- fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
55
- return a.exp + 1;
85
- target_disas(logfile, cs, pc, dcbase->tb->size);
56
+ return exp + 1;
86
+ return false;
57
}
58
return -1;
87
}
59
}
88
#endif
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
89
61
/* Return a maximum finite value with the requested sign */
90
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
91
index XXXXXXX..XXXXXXX 100644
63
{
92
--- a/target/s390x/tcg/translate.c
64
+ uint64_t ret;
93
+++ b/target/s390x/tcg/translate.c
65
+
94
@@ -XXX,XX +XXX,XX @@
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
95
#include "qemu/osdep.h"
67
&& ((a.guard | a.round | a.sticky) == 0)) {
96
#include "cpu.h"
68
/* result zero */
97
#include "s390x-internal.h"
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
98
-#include "disas/disas.h"
70
}
99
#include "exec/exec-all.h"
100
#include "tcg/tcg-op.h"
101
#include "tcg/tcg-op-gvec.h"
102
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
103
}
71
}
72
/* Underflow? */
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
74
+ ret = int128_getlo(a.mant);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
104
}
97
}
105
98
106
-static void s390x_tr_disas_log(const DisasContextBase *dcbase,
99
float64 internal_mpyhh(float64 a, float64 b,
107
+static bool s390x_tr_disas_log(const DisasContextBase *dcbase,
108
CPUState *cs, FILE *logfile)
109
{
110
DisasContext *dc = container_of(dcbase, DisasContext, base);
111
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_disas_log(const DisasContextBase *dcbase,
112
if (unlikely(dc->ex_value)) {
113
/* ??? Unfortunately target_disas can't use host memory. */
114
fprintf(logfile, "IN: EXECUTE %016" PRIx64, dc->ex_value);
115
- } else {
116
- fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
117
- target_disas(logfile, cs, dc->base.pc_first, dc->base.tb->size);
118
+ return true;
119
}
120
+ return false;
121
}
122
123
static const TranslatorOps s390x_tr_ops = {
124
--
100
--
125
2.34.1
101
2.43.0
126
127
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
No need to open-code 64x64->128-bit multiplication.
2
2
3
Previous commits replaced them by translator_ld* calls.
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Message-Id: <20240405131532.40913-1-philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
include/exec/cpu_ldst.h | 10 ----------
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
10
1 file changed, 10 deletions(-)
7
1 file changed, 3 insertions(+), 29 deletions(-)
11
8
12
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/cpu_ldst.h
11
--- a/target/hexagon/fma_emu.c
15
+++ b/include/exec/cpu_ldst.h
12
+++ b/target/hexagon/fma_emu.c
16
@@ -XXX,XX +XXX,XX @@ uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr);
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
17
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr);
14
return -1;
18
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr);
15
}
19
16
20
-static inline int cpu_ldsb_code(CPUArchState *env, abi_ptr addr)
17
-static uint32_t int128_getw0(Int128 x)
21
-{
18
-{
22
- return (int8_t)cpu_ldub_code(env, addr);
19
- return int128_getlo(x);
23
-}
20
-}
24
-
21
-
25
-static inline int cpu_ldsw_code(CPUArchState *env, abi_ptr addr)
22
-static uint32_t int128_getw1(Int128 x)
26
-{
23
-{
27
- return (int16_t)cpu_lduw_code(env, addr);
24
- return int128_getlo(x) >> 32;
28
-}
25
-}
29
-
26
-
30
/**
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
31
* tlb_vaddr_to_host:
28
{
32
* @env: CPUArchState
29
- Int128 a, b;
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
31
+ uint64_t l, h;
32
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
39
-
40
- pp1s = pp1a + pp1b;
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
42
- pp2 += (1ULL << 32);
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
47
- }
48
-
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
50
+ mulu64(&l, &h, ai, bi);
51
+ return int128_make128(l, h);
52
}
53
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
33
--
55
--
34
2.34.1
56
2.43.0
35
36
diff view generated by jsdifflib
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
target/hexagon/translate.c | 3 +--
7
target/hexagon/fma_emu.c | 2 +-
5
1 file changed, 1 insertion(+), 2 deletions(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
6
9
7
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/target/hexagon/translate.c
12
--- a/target/hexagon/fma_emu.c
10
+++ b/target/hexagon/translate.c
13
+++ b/target/hexagon/fma_emu.c
11
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
12
#include "exec/translation-block.h"
15
float64_is_infinity(b)) {
13
#include "exec/cpu_ldst.h"
16
return float64_mul(a, b, fp_status);
14
#include "exec/log.h"
15
-#include "exec/cpu_ldst.h"
16
#include "internal.h"
17
#include "attribs.h"
18
#include "insn.h"
19
@@ -XXX,XX +XXX,XX @@ static bool pkt_crosses_page(CPUHexagonState *env, DisasContext *ctx)
20
int nwords;
21
22
for (nwords = 0; !found_end && nwords < PACKET_WORDS_MAX; nwords++) {
23
- uint32_t word = cpu_ldl_code(env,
24
+ uint32_t word = translator_ldl(env, &ctx->base,
25
ctx->base.pc_next + nwords * sizeof(uint32_t));
26
found_end = is_packet_end(word);
27
}
17
}
18
- x.mant = int128_mul_6464(accumulated, 1);
19
+ x.mant = int128_make64(accumulated);
20
x.sticky = sticky;
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
28
--
23
--
29
2.34.1
24
2.43.0
30
31
diff view generated by jsdifflib
1
Almost all of the disas_log implementations are identical.
1
Convert all targets simultaneously, as the gen_intermediate_code
2
Unify them within translator_loop.
2
function disappears from the target. While there are possible
3
3
workarounds, they're larger than simply performing the conversion.
4
Drop extra Priv/Virt logging from target/riscv.
5
4
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
accel/tcg/translator.c | 9 ++++++++-
8
include/exec/translator.h | 14 --------------
10
target/alpha/translate.c | 9 ---------
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
11
target/arm/tcg/translate-a64.c | 11 -----------
10
target/alpha/cpu.h | 2 ++
12
target/arm/tcg/translate.c | 12 ------------
11
target/arm/internals.h | 2 ++
13
target/avr/translate.c | 8 --------
12
target/avr/cpu.h | 2 ++
14
target/cris/translate.c | 11 -----------
13
target/hexagon/cpu.h | 2 ++
15
target/hexagon/translate.c | 9 ---------
14
target/hppa/cpu.h | 2 ++
16
target/hppa/translate.c | 6 ++++--
15
target/i386/tcg/helper-tcg.h | 2 ++
17
target/i386/tcg/translate.c | 11 -----------
16
target/loongarch/internals.h | 2 ++
18
target/loongarch/tcg/translate.c | 8 --------
17
target/m68k/cpu.h | 2 ++
19
target/m68k/translate.c | 9 ---------
18
target/microblaze/cpu.h | 2 ++
20
target/microblaze/translate.c | 9 ---------
19
target/mips/tcg/tcg-internal.h | 2 ++
21
target/mips/tcg/translate.c | 9 ---------
20
target/openrisc/cpu.h | 2 ++
22
target/openrisc/translate.c | 11 -----------
21
target/ppc/cpu.h | 2 ++
23
target/ppc/translate.c | 9 ---------
22
target/riscv/cpu.h | 3 +++
24
target/riscv/translate.c | 18 ------------------
23
target/rx/cpu.h | 2 ++
25
target/rx/translate.c | 8 --------
24
target/s390x/s390x-internal.h | 2 ++
26
target/sh4/translate.c | 9 ---------
25
target/sh4/cpu.h | 2 ++
27
target/sparc/translate.c | 9 ---------
26
target/sparc/cpu.h | 2 ++
28
target/tricore/translate.c | 9 ---------
27
target/tricore/cpu.h | 2 ++
29
target/xtensa/translate.c | 9 ---------
28
target/xtensa/cpu.h | 2 ++
30
21 files changed, 12 insertions(+), 191 deletions(-)
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
31
71
32
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
33
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
34
--- a/accel/tcg/translator.c
74
--- a/include/exec/translator.h
35
+++ b/accel/tcg/translator.c
75
+++ b/include/exec/translator.h
36
@@ -XXX,XX +XXX,XX @@
76
@@ -XXX,XX +XXX,XX @@
37
#include "exec/cpu_ldst.h"
77
#include "qemu/bswap.h"
38
#include "tcg/tcg-op-common.h"
78
#include "exec/vaddr.h"
39
#include "internal-target.h"
79
40
+#include "disas/disas.h"
80
-/**
41
81
- * gen_intermediate_code
42
static void set_can_do_io(DisasContextBase *db, bool val)
82
- * @cpu: cpu context
43
{
83
- * @tb: translation block
44
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
84
- * @max_insns: max number of instructions to translate
45
FILE *logfile = qemu_log_trylock();
85
- * @pc: guest virtual program counter address
46
if (logfile) {
86
- * @host_pc: host physical program counter address
47
fprintf(logfile, "----------------\n");
87
- *
48
- ops->disas_log(db, cpu, logfile);
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
90
- */
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
93
-
94
/**
95
* DisasJumpType:
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
49
+
287
+
50
+ if (ops->disas_log) {
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
51
+ ops->disas_log(db, cpu, logfile);
289
uint32_t exception, uintptr_t pc);
52
+ } else {
290
53
+ fprintf(logfile, "IN: %s\n", lookup_symbol(db->pc_first));
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
54
+ target_disas(logfile, cpu, db->pc_first, db->tb->size);
292
index XXXXXXX..XXXXXXX 100644
55
+ }
293
--- a/target/rx/cpu.h
56
fprintf(logfile, "\n");
294
+++ b/target/rx/cpu.h
57
qemu_log_unlock(logfile);
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
58
}
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
391
index XXXXXXX..XXXXXXX 100644
392
--- a/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
408
/*
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
59
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
60
index XXXXXXX..XXXXXXX 100644
429
index XXXXXXX..XXXXXXX 100644
61
--- a/target/alpha/translate.c
430
--- a/target/alpha/translate.c
62
+++ b/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
63
@@ -XXX,XX +XXX,XX @@
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
64
#include "qemu/osdep.h"
65
#include "cpu.h"
66
#include "sysemu/cpus.h"
67
-#include "disas/disas.h"
68
#include "qemu/host-utils.h"
69
#include "exec/exec-all.h"
70
#include "tcg/tcg-op.h"
71
@@ -XXX,XX +XXX,XX @@ static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
72
}
73
}
74
75
-static void alpha_tr_disas_log(const DisasContextBase *dcbase,
76
- CPUState *cpu, FILE *logfile)
77
-{
78
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
79
- target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
80
-}
81
-
82
static const TranslatorOps alpha_tr_ops = {
83
.init_disas_context = alpha_tr_init_disas_context,
84
.tb_start = alpha_tr_tb_start,
85
.insn_start = alpha_tr_insn_start,
86
.translate_insn = alpha_tr_translate_insn,
87
.tb_stop = alpha_tr_tb_stop,
433
.tb_stop = alpha_tr_tb_stop,
88
- .disas_log = alpha_tr_disas_log,
434
};
89
};
435
90
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
91
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
92
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
93
index XXXXXXX..XXXXXXX 100644
439
+ int *max_insns, vaddr pc, void *host_pc)
94
--- a/target/arm/tcg/translate-a64.c
440
{
95
+++ b/target/arm/tcg/translate-a64.c
441
DisasContext dc;
96
@@ -XXX,XX +XXX,XX @@
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
97
#include "translate.h"
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
98
#include "translate-a64.h"
444
index XXXXXXX..XXXXXXX 100644
99
#include "qemu/log.h"
445
--- a/target/arm/cpu.c
100
-#include "disas/disas.h"
446
+++ b/target/arm/cpu.c
101
#include "arm_ldst.h"
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
102
#include "semihosting/semihost.h"
448
#ifdef CONFIG_TCG
103
#include "cpregs.h"
449
static const TCGCPUOps arm_tcg_ops = {
104
@@ -XXX,XX +XXX,XX @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
450
.initialize = arm_translate_init,
105
}
451
+ .translate_code = arm_translate_code,
106
}
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
107
453
.debug_excp_handler = arm_debug_excp_handler,
108
-static void aarch64_tr_disas_log(const DisasContextBase *dcbase,
454
.restore_state_to_opc = arm_restore_state_to_opc,
109
- CPUState *cpu, FILE *logfile)
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
110
-{
456
index XXXXXXX..XXXXXXX 100644
111
- DisasContext *dc = container_of(dcbase, DisasContext, base);
457
--- a/target/arm/tcg/cpu-v7m.c
112
-
458
+++ b/target/arm/tcg/cpu-v7m.c
113
- fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
114
- target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
460
115
-}
461
static const TCGCPUOps arm_v7m_tcg_ops = {
116
-
462
.initialize = arm_translate_init,
117
const TranslatorOps aarch64_translator_ops = {
463
+ .translate_code = arm_translate_code,
118
.init_disas_context = aarch64_tr_init_disas_context,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
119
.tb_start = aarch64_tr_tb_start,
465
.debug_excp_handler = arm_debug_excp_handler,
120
.insn_start = aarch64_tr_insn_start,
466
.restore_state_to_opc = arm_restore_state_to_opc,
121
.translate_insn = aarch64_tr_translate_insn,
122
.tb_stop = aarch64_tr_tb_stop,
123
- .disas_log = aarch64_tr_disas_log,
124
};
125
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
126
index XXXXXXX..XXXXXXX 100644
468
index XXXXXXX..XXXXXXX 100644
127
--- a/target/arm/tcg/translate.c
469
--- a/target/arm/tcg/translate.c
128
+++ b/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
129
@@ -XXX,XX +XXX,XX @@
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
130
#include "translate.h"
131
#include "translate-a32.h"
132
#include "qemu/log.h"
133
-#include "disas/disas.h"
134
#include "arm_ldst.h"
135
#include "semihosting/semihost.h"
136
#include "cpregs.h"
137
@@ -XXX,XX +XXX,XX @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
138
}
139
}
140
141
-static void arm_tr_disas_log(const DisasContextBase *dcbase,
142
- CPUState *cpu, FILE *logfile)
143
-{
144
- DisasContext *dc = container_of(dcbase, DisasContext, base);
145
-
146
- fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
147
- target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
148
-}
149
-
150
static const TranslatorOps arm_translator_ops = {
151
.init_disas_context = arm_tr_init_disas_context,
152
.tb_start = arm_tr_tb_start,
153
.insn_start = arm_tr_insn_start,
154
.translate_insn = arm_tr_translate_insn,
155
.tb_stop = arm_tr_tb_stop,
472
.tb_stop = arm_tr_tb_stop,
156
- .disas_log = arm_tr_disas_log,
473
};
157
};
474
158
475
-/* generate intermediate code for basic block 'tb'. */
159
static const TranslatorOps thumb_translator_ops = {
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
160
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
477
- vaddr pc, void *host_pc)
161
.insn_start = arm_tr_insn_start,
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
162
.translate_insn = thumb_tr_translate_insn,
479
+ int *max_insns, vaddr pc, void *host_pc)
163
.tb_stop = arm_tr_tb_stop,
480
{
164
- .disas_log = arm_tr_disas_log,
481
DisasContext dc = { };
165
};
482
const TranslatorOps *ops = &arm_translator_ops;
166
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
167
/* generate intermediate code for basic block 'tb'. */
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
168
diff --git a/target/avr/translate.c b/target/avr/translate.c
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
169
index XXXXXXX..XXXXXXX 100644
496
index XXXXXXX..XXXXXXX 100644
170
--- a/target/avr/translate.c
497
--- a/target/avr/translate.c
171
+++ b/target/avr/translate.c
498
+++ b/target/avr/translate.c
172
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
173
}
500
*
174
}
501
* - translate()
175
502
* - canonicalize_skip()
176
-static void avr_tr_disas_log(const DisasContextBase *dcbase,
503
- * - gen_intermediate_code()
177
- CPUState *cs, FILE *logfile)
504
+ * - translate_code()
178
-{
505
* - restore_state_to_opc()
179
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
506
*
180
- target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
507
*/
181
-}
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
182
-
183
static const TranslatorOps avr_tr_ops = {
184
.init_disas_context = avr_tr_init_disas_context,
185
.tb_start = avr_tr_tb_start,
186
.insn_start = avr_tr_insn_start,
187
.translate_insn = avr_tr_translate_insn,
188
.tb_stop = avr_tr_tb_stop,
509
.tb_stop = avr_tr_tb_stop,
189
- .disas_log = avr_tr_disas_log,
510
};
190
};
511
191
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
192
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
193
diff --git a/target/cris/translate.c b/target/cris/translate.c
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
194
index XXXXXXX..XXXXXXX 100644
515
+ int *max_insns, vaddr pc, void *host_pc)
195
--- a/target/cris/translate.c
516
{
196
+++ b/target/cris/translate.c
517
DisasContext dc = { };
197
@@ -XXX,XX +XXX,XX @@
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
198
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
199
#include "qemu/osdep.h"
520
index XXXXXXX..XXXXXXX 100644
200
#include "cpu.h"
521
--- a/target/hexagon/cpu.c
201
-#include "disas/disas.h"
522
+++ b/target/hexagon/cpu.c
202
#include "exec/exec-all.h"
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
203
#include "tcg/tcg-op.h"
524
204
#include "exec/helper-proto.h"
525
static const TCGCPUOps hexagon_tcg_ops = {
205
@@ -XXX,XX +XXX,XX @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
526
.initialize = hexagon_translate_init,
206
}
527
+ .translate_code = hexagon_translate_code,
207
}
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
208
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
209
-static void cris_tr_disas_log(const DisasContextBase *dcbase,
530
};
210
- CPUState *cpu, FILE *logfile)
211
-{
212
- if (!DISAS_CRIS) {
213
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
214
- target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
215
- }
216
-}
217
-
218
static const TranslatorOps cris_tr_ops = {
219
.init_disas_context = cris_tr_init_disas_context,
220
.tb_start = cris_tr_tb_start,
221
.insn_start = cris_tr_insn_start,
222
.translate_insn = cris_tr_translate_insn,
223
.tb_stop = cris_tr_tb_stop,
224
- .disas_log = cris_tr_disas_log,
225
};
226
227
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
228
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
229
index XXXXXXX..XXXXXXX 100644
532
index XXXXXXX..XXXXXXX 100644
230
--- a/target/hexagon/translate.c
533
--- a/target/hexagon/translate.c
231
+++ b/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
232
@@ -XXX,XX +XXX,XX @@ static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
233
}
234
}
235
236
-static void hexagon_tr_disas_log(const DisasContextBase *dcbase,
237
- CPUState *cpu, FILE *logfile)
238
-{
239
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
240
- target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
241
-}
242
-
243
-
244
static const TranslatorOps hexagon_tr_ops = {
245
.init_disas_context = hexagon_tr_init_disas_context,
246
.tb_start = hexagon_tr_tb_start,
247
.insn_start = hexagon_tr_insn_start,
248
.translate_insn = hexagon_tr_translate_packet,
249
.tb_stop = hexagon_tr_tb_stop,
536
.tb_stop = hexagon_tr_tb_stop,
250
- .disas_log = hexagon_tr_disas_log,
537
};
251
};
538
252
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
253
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
254
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
255
index XXXXXXX..XXXXXXX 100644
559
index XXXXXXX..XXXXXXX 100644
256
--- a/target/hppa/translate.c
560
--- a/target/hppa/translate.c
257
+++ b/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
258
@@ -XXX,XX +XXX,XX @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
259
}
260
}
261
262
+#ifdef CONFIG_USER_ONLY
263
static void hppa_tr_disas_log(const DisasContextBase *dcbase,
264
CPUState *cs, FILE *logfile)
265
{
266
target_ulong pc = dcbase->pc_first;
267
268
-#ifdef CONFIG_USER_ONLY
269
switch (pc) {
270
case 0x00:
271
fprintf(logfile, "IN:\n0x00000000: (null)\n");
272
@@ -XXX,XX +XXX,XX @@ static void hppa_tr_disas_log(const DisasContextBase *dcbase,
273
fprintf(logfile, "IN:\n0x00000100: syscall\n");
274
return;
275
}
276
-#endif
277
278
fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
279
target_disas(logfile, cs, pc, dcbase->tb->size);
280
}
281
+#endif
282
283
static const TranslatorOps hppa_tr_ops = {
284
.init_disas_context = hppa_tr_init_disas_context,
285
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
286
.insn_start = hppa_tr_insn_start,
563
#endif
287
.translate_insn = hppa_tr_translate_insn,
564
};
288
.tb_stop = hppa_tr_tb_stop,
565
289
+#ifdef CONFIG_USER_ONLY
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
290
.disas_log = hppa_tr_disas_log,
567
- vaddr pc, void *host_pc)
291
+#endif
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
292
};
569
+ int *max_insns, vaddr pc, void *host_pc)
293
570
{
294
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
295
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
296
index XXXXXXX..XXXXXXX 100644
586
index XXXXXXX..XXXXXXX 100644
297
--- a/target/i386/tcg/translate.c
587
--- a/target/i386/tcg/translate.c
298
+++ b/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
299
@@ -XXX,XX +XXX,XX @@
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
300
301
#include "qemu/host-utils.h"
302
#include "cpu.h"
303
-#include "disas/disas.h"
304
#include "exec/exec-all.h"
305
#include "tcg/tcg-op.h"
306
#include "tcg/tcg-op-gvec.h"
307
@@ -XXX,XX +XXX,XX @@ static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
308
}
309
}
310
311
-static void i386_tr_disas_log(const DisasContextBase *dcbase,
312
- CPUState *cpu, FILE *logfile)
313
-{
314
- DisasContext *dc = container_of(dcbase, DisasContext, base);
315
-
316
- fprintf(logfile, "IN: %s\n", lookup_symbol(dc->base.pc_first));
317
- target_disas(logfile, cpu, dc->base.pc_first, dc->base.tb->size);
318
-}
319
-
320
static const TranslatorOps i386_tr_ops = {
321
.init_disas_context = i386_tr_init_disas_context,
322
.tb_start = i386_tr_tb_start,
323
.insn_start = i386_tr_insn_start,
324
.translate_insn = i386_tr_translate_insn,
325
.tb_stop = i386_tr_tb_stop,
590
.tb_stop = i386_tr_tb_stop,
326
- .disas_log = i386_tr_disas_log,
591
};
327
};
592
328
593
-/* generate intermediate code for basic block 'tb'. */
329
/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
330
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
331
index XXXXXXX..XXXXXXX 100644
614
index XXXXXXX..XXXXXXX 100644
332
--- a/target/loongarch/tcg/translate.c
615
--- a/target/loongarch/tcg/translate.c
333
+++ b/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
334
@@ -XXX,XX +XXX,XX @@ static void loongarch_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
335
}
336
}
337
338
-static void loongarch_tr_disas_log(const DisasContextBase *dcbase,
339
- CPUState *cpu, FILE *logfile)
340
-{
341
- qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
342
- target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
343
-}
344
-
345
static const TranslatorOps loongarch_tr_ops = {
346
.init_disas_context = loongarch_tr_init_disas_context,
347
.tb_start = loongarch_tr_tb_start,
348
.insn_start = loongarch_tr_insn_start,
349
.translate_insn = loongarch_tr_translate_insn,
350
.tb_stop = loongarch_tr_tb_stop,
618
.tb_stop = loongarch_tr_tb_stop,
351
- .disas_log = loongarch_tr_disas_log,
619
};
352
};
620
353
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
354
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
355
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
356
index XXXXXXX..XXXXXXX 100644
641
index XXXXXXX..XXXXXXX 100644
357
--- a/target/m68k/translate.c
642
--- a/target/m68k/translate.c
358
+++ b/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
359
@@ -XXX,XX +XXX,XX @@
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
360
361
#include "qemu/osdep.h"
362
#include "cpu.h"
363
-#include "disas/disas.h"
364
#include "exec/exec-all.h"
365
#include "tcg/tcg-op.h"
366
#include "qemu/log.h"
367
@@ -XXX,XX +XXX,XX @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
368
}
369
}
370
371
-static void m68k_tr_disas_log(const DisasContextBase *dcbase,
372
- CPUState *cpu, FILE *logfile)
373
-{
374
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
375
- target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
376
-}
377
-
378
static const TranslatorOps m68k_tr_ops = {
379
.init_disas_context = m68k_tr_init_disas_context,
380
.tb_start = m68k_tr_tb_start,
381
.insn_start = m68k_tr_insn_start,
382
.translate_insn = m68k_tr_translate_insn,
383
.tb_stop = m68k_tr_tb_stop,
645
.tb_stop = m68k_tr_tb_stop,
384
- .disas_log = m68k_tr_disas_log,
646
};
385
};
647
386
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
387
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
388
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
389
index XXXXXXX..XXXXXXX 100644
668
index XXXXXXX..XXXXXXX 100644
390
--- a/target/microblaze/translate.c
669
--- a/target/microblaze/translate.c
391
+++ b/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
392
@@ -XXX,XX +XXX,XX @@
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
393
394
#include "qemu/osdep.h"
395
#include "cpu.h"
396
-#include "disas/disas.h"
397
#include "exec/exec-all.h"
398
#include "exec/cpu_ldst.h"
399
#include "tcg/tcg-op.h"
400
@@ -XXX,XX +XXX,XX @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs)
401
}
402
}
403
404
-static void mb_tr_disas_log(const DisasContextBase *dcb,
405
- CPUState *cs, FILE *logfile)
406
-{
407
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcb->pc_first));
408
- target_disas(logfile, cs, dcb->pc_first, dcb->tb->size);
409
-}
410
-
411
static const TranslatorOps mb_tr_ops = {
412
.init_disas_context = mb_tr_init_disas_context,
413
.tb_start = mb_tr_tb_start,
414
.insn_start = mb_tr_insn_start,
415
.translate_insn = mb_tr_translate_insn,
416
.tb_stop = mb_tr_tb_stop,
672
.tb_stop = mb_tr_tb_stop,
417
- .disas_log = mb_tr_disas_log,
673
};
418
};
674
419
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
420
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
421
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
422
index XXXXXXX..XXXXXXX 100644
695
index XXXXXXX..XXXXXXX 100644
423
--- a/target/mips/tcg/translate.c
696
--- a/target/mips/tcg/translate.c
424
+++ b/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
425
@@ -XXX,XX +XXX,XX @@
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
426
#include "exec/translation-block.h"
427
#include "semihosting/semihost.h"
428
#include "trace.h"
429
-#include "disas/disas.h"
430
#include "fpu_helper.h"
431
432
#define HELPER_H "helper.h"
433
@@ -XXX,XX +XXX,XX @@ static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
434
}
435
}
436
437
-static void mips_tr_disas_log(const DisasContextBase *dcbase,
438
- CPUState *cs, FILE *logfile)
439
-{
440
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
441
- target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
442
-}
443
-
444
static const TranslatorOps mips_tr_ops = {
445
.init_disas_context = mips_tr_init_disas_context,
446
.tb_start = mips_tr_tb_start,
447
.insn_start = mips_tr_insn_start,
448
.translate_insn = mips_tr_translate_insn,
449
.tb_stop = mips_tr_tb_stop,
699
.tb_stop = mips_tr_tb_stop,
450
- .disas_log = mips_tr_disas_log,
700
};
451
};
701
452
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
453
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
454
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
455
index XXXXXXX..XXXXXXX 100644
722
index XXXXXXX..XXXXXXX 100644
456
--- a/target/openrisc/translate.c
723
--- a/target/openrisc/translate.c
457
+++ b/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
458
@@ -XXX,XX +XXX,XX @@
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
459
#include "qemu/osdep.h"
460
#include "cpu.h"
461
#include "exec/exec-all.h"
462
-#include "disas/disas.h"
463
#include "tcg/tcg-op.h"
464
#include "qemu/log.h"
465
#include "qemu/bitops.h"
466
@@ -XXX,XX +XXX,XX @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
467
}
468
}
469
470
-static void openrisc_tr_disas_log(const DisasContextBase *dcbase,
471
- CPUState *cs, FILE *logfile)
472
-{
473
- DisasContext *s = container_of(dcbase, DisasContext, base);
474
-
475
- fprintf(logfile, "IN: %s\n", lookup_symbol(s->base.pc_first));
476
- target_disas(logfile, cs, s->base.pc_first, s->base.tb->size);
477
-}
478
-
479
static const TranslatorOps openrisc_tr_ops = {
480
.init_disas_context = openrisc_tr_init_disas_context,
481
.tb_start = openrisc_tr_tb_start,
482
.insn_start = openrisc_tr_insn_start,
483
.translate_insn = openrisc_tr_translate_insn,
484
.tb_stop = openrisc_tr_tb_stop,
726
.tb_stop = openrisc_tr_tb_stop,
485
- .disas_log = openrisc_tr_disas_log,
727
};
486
};
728
487
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
488
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
489
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
490
index XXXXXXX..XXXXXXX 100644
749
index XXXXXXX..XXXXXXX 100644
491
--- a/target/ppc/translate.c
750
--- a/target/ppc/translate.c
492
+++ b/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
493
@@ -XXX,XX +XXX,XX @@
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
494
#include "qemu/osdep.h"
495
#include "cpu.h"
496
#include "internal.h"
497
-#include "disas/disas.h"
498
#include "exec/exec-all.h"
499
#include "tcg/tcg-op.h"
500
#include "tcg/tcg-op-gvec.h"
501
@@ -XXX,XX +XXX,XX @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
502
}
503
}
504
505
-static void ppc_tr_disas_log(const DisasContextBase *dcbase,
506
- CPUState *cs, FILE *logfile)
507
-{
508
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
509
- target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
510
-}
511
-
512
static const TranslatorOps ppc_tr_ops = {
513
.init_disas_context = ppc_tr_init_disas_context,
514
.tb_start = ppc_tr_tb_start,
515
.insn_start = ppc_tr_insn_start,
516
.translate_insn = ppc_tr_translate_insn,
517
.tb_stop = ppc_tr_tb_stop,
753
.tb_stop = ppc_tr_tb_stop,
518
- .disas_log = ppc_tr_disas_log,
754
};
519
};
755
520
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
521
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
522
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
523
index XXXXXXX..XXXXXXX 100644
776
index XXXXXXX..XXXXXXX 100644
524
--- a/target/riscv/translate.c
777
--- a/target/riscv/translate.c
525
+++ b/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
526
@@ -XXX,XX +XXX,XX @@
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
527
#include "qemu/log.h"
528
#include "cpu.h"
529
#include "tcg/tcg-op.h"
530
-#include "disas/disas.h"
531
#include "exec/cpu_ldst.h"
532
#include "exec/exec-all.h"
533
#include "exec/helper-proto.h"
534
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
535
}
536
}
537
538
-static void riscv_tr_disas_log(const DisasContextBase *dcbase,
539
- CPUState *cpu, FILE *logfile)
540
-{
541
-#ifndef CONFIG_USER_ONLY
542
- RISCVCPU *rvcpu = RISCV_CPU(cpu);
543
- CPURISCVState *env = &rvcpu->env;
544
-#endif
545
-
546
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
547
-#ifndef CONFIG_USER_ONLY
548
- fprintf(logfile, "Priv: "TARGET_FMT_ld"; Virt: %d\n",
549
- env->priv, env->virt_enabled);
550
-#endif
551
- target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
552
-}
553
-
554
static const TranslatorOps riscv_tr_ops = {
555
.init_disas_context = riscv_tr_init_disas_context,
556
.tb_start = riscv_tr_tb_start,
557
.insn_start = riscv_tr_insn_start,
558
.translate_insn = riscv_tr_translate_insn,
559
.tb_stop = riscv_tr_tb_stop,
780
.tb_stop = riscv_tr_tb_stop,
560
- .disas_log = riscv_tr_disas_log,
781
};
561
};
782
562
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
563
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
564
diff --git a/target/rx/translate.c b/target/rx/translate.c
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
565
index XXXXXXX..XXXXXXX 100644
803
index XXXXXXX..XXXXXXX 100644
566
--- a/target/rx/translate.c
804
--- a/target/rx/translate.c
567
+++ b/target/rx/translate.c
805
+++ b/target/rx/translate.c
568
@@ -XXX,XX +XXX,XX @@ static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
569
}
570
}
571
572
-static void rx_tr_disas_log(const DisasContextBase *dcbase,
573
- CPUState *cs, FILE *logfile)
574
-{
575
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
576
- target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
577
-}
578
-
579
static const TranslatorOps rx_tr_ops = {
580
.init_disas_context = rx_tr_init_disas_context,
581
.tb_start = rx_tr_tb_start,
582
.insn_start = rx_tr_insn_start,
583
.translate_insn = rx_tr_translate_insn,
584
.tb_stop = rx_tr_tb_stop,
807
.tb_stop = rx_tr_tb_stop,
585
- .disas_log = rx_tr_disas_log,
808
};
586
};
809
587
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
588
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
589
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
590
index XXXXXXX..XXXXXXX 100644
857
index XXXXXXX..XXXXXXX 100644
591
--- a/target/sh4/translate.c
858
--- a/target/sh4/translate.c
592
+++ b/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
593
@@ -XXX,XX +XXX,XX @@
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
594
595
#include "qemu/osdep.h"
596
#include "cpu.h"
597
-#include "disas/disas.h"
598
#include "exec/exec-all.h"
599
#include "tcg/tcg-op.h"
600
#include "exec/helper-proto.h"
601
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
602
}
603
}
604
605
-static void sh4_tr_disas_log(const DisasContextBase *dcbase,
606
- CPUState *cs, FILE *logfile)
607
-{
608
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
609
- target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
610
-}
611
-
612
static const TranslatorOps sh4_tr_ops = {
613
.init_disas_context = sh4_tr_init_disas_context,
614
.tb_start = sh4_tr_tb_start,
615
.insn_start = sh4_tr_insn_start,
616
.translate_insn = sh4_tr_translate_insn,
617
.tb_stop = sh4_tr_tb_stop,
861
.tb_stop = sh4_tr_tb_stop,
618
- .disas_log = sh4_tr_disas_log,
862
};
619
};
863
620
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
621
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
622
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
623
index XXXXXXX..XXXXXXX 100644
884
index XXXXXXX..XXXXXXX 100644
624
--- a/target/sparc/translate.c
885
--- a/target/sparc/translate.c
625
+++ b/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
626
@@ -XXX,XX +XXX,XX @@
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
627
#include "qemu/osdep.h"
628
629
#include "cpu.h"
630
-#include "disas/disas.h"
631
#include "exec/helper-proto.h"
632
#include "exec/exec-all.h"
633
#include "tcg/tcg-op.h"
634
@@ -XXX,XX +XXX,XX @@ static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
635
}
636
}
637
638
-static void sparc_tr_disas_log(const DisasContextBase *dcbase,
639
- CPUState *cpu, FILE *logfile)
640
-{
641
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
642
- target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
643
-}
644
-
645
static const TranslatorOps sparc_tr_ops = {
646
.init_disas_context = sparc_tr_init_disas_context,
647
.tb_start = sparc_tr_tb_start,
648
.insn_start = sparc_tr_insn_start,
649
.translate_insn = sparc_tr_translate_insn,
650
.tb_stop = sparc_tr_tb_stop,
888
.tb_stop = sparc_tr_tb_stop,
651
- .disas_log = sparc_tr_disas_log,
889
};
652
};
890
653
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
654
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
655
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
656
index XXXXXXX..XXXXXXX 100644
911
index XXXXXXX..XXXXXXX 100644
657
--- a/target/tricore/translate.c
912
--- a/target/tricore/translate.c
658
+++ b/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
659
@@ -XXX,XX +XXX,XX @@
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
660
915
.tb_stop = tricore_tr_tb_stop,
661
#include "qemu/osdep.h"
916
};
662
#include "cpu.h"
917
663
-#include "disas/disas.h"
664
#include "exec/exec-all.h"
665
#include "tcg/tcg-op.h"
666
#include "exec/cpu_ldst.h"
667
@@ -XXX,XX +XXX,XX @@ static void tricore_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
668
}
669
}
670
671
-static void tricore_tr_disas_log(const DisasContextBase *dcbase,
672
- CPUState *cpu, FILE *logfile)
673
-{
674
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
675
- target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
676
-}
677
-
918
-
678
static const TranslatorOps tricore_tr_ops = {
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
679
.init_disas_context = tricore_tr_init_disas_context,
920
- vaddr pc, void *host_pc)
680
.tb_start = tricore_tr_tb_start,
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
681
.insn_start = tricore_tr_insn_start,
922
+ int *max_insns, vaddr pc, void *host_pc)
682
.translate_insn = tricore_tr_translate_insn,
923
{
683
.tb_stop = tricore_tr_tb_stop,
924
DisasContext ctx;
684
- .disas_log = tricore_tr_disas_log,
925
translator_loop(cs, tb, max_insns, pc, host_pc,
685
};
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
686
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
687
937
688
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
689
index XXXXXXX..XXXXXXX 100644
939
index XXXXXXX..XXXXXXX 100644
690
--- a/target/xtensa/translate.c
940
--- a/target/xtensa/translate.c
691
+++ b/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
692
@@ -XXX,XX +XXX,XX @@
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
693
694
#include "cpu.h"
695
#include "exec/exec-all.h"
696
-#include "disas/disas.h"
697
#include "tcg/tcg-op.h"
698
#include "qemu/log.h"
699
#include "qemu/qemu-print.h"
700
@@ -XXX,XX +XXX,XX @@ static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
701
}
702
}
703
704
-static void xtensa_tr_disas_log(const DisasContextBase *dcbase,
705
- CPUState *cpu, FILE *logfile)
706
-{
707
- fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
708
- target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
709
-}
710
-
711
static const TranslatorOps xtensa_translator_ops = {
712
.init_disas_context = xtensa_tr_init_disas_context,
713
.tb_start = xtensa_tr_tb_start,
714
.insn_start = xtensa_tr_insn_start,
715
.translate_insn = xtensa_tr_translate_insn,
716
.tb_stop = xtensa_tr_tb_stop,
943
.tb_stop = xtensa_tr_tb_stop,
717
- .disas_log = xtensa_tr_disas_log,
944
};
718
};
945
719
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
720
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
721
--
953
--
722
2.34.1
954
2.43.0
723
955
724
956
diff view generated by jsdifflib