1
The following changes since commit 9c6c079bc6723da8061ccfb44361d67b1dd785dd:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge tag 'pull-target-arm-20240430' of https://git.linaro.org/people/pmaydell/qemu-arm into staging (2024-04-30 09:58:54 -0700)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20240501
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to 917d7f8d948d706e275c9f33169b9dd0149ded1e:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
plugins: Update the documentation block for plugin-gen.c (2024-04-30 16:12:05 -0700)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
plugins: Rewrite plugin tcg expansion
14
tcg/optimize: Remove in-flight mask data from OptContext
15
fpu: Add float*_muladd_scalbn
16
fpu: Remove float_muladd_halve_result
17
fpu: Add float_round_nearest_even_max
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
15
21
16
----------------------------------------------------------------
22
----------------------------------------------------------------
17
Richard Henderson (20):
23
Ilya Leoshkevich (1):
18
tcg: Make tcg/helper-info.h self-contained
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
19
tcg: Pass function pointer to tcg_gen_call*
20
plugins: Zero new qemu_plugin_dyn_cb entries
21
plugins: Move function pointer in qemu_plugin_dyn_cb
22
plugins: Create TCGHelperInfo for all out-of-line callbacks
23
plugins: Use emit_before_op for PLUGIN_GEN_AFTER_INSN
24
plugins: Use emit_before_op for PLUGIN_GEN_FROM_TB
25
plugins: Add PLUGIN_GEN_AFTER_TB
26
plugins: Use emit_before_op for PLUGIN_GEN_FROM_INSN
27
plugins: Use emit_before_op for PLUGIN_GEN_FROM_MEM
28
plugins: Remove plugin helpers
29
tcg: Remove TCG_CALL_PLUGIN
30
tcg: Remove INDEX_op_plugin_cb_{start,end}
31
plugins: Simplify callback queues
32
plugins: Introduce PLUGIN_CB_MEM_REGULAR
33
plugins: Replace pr_ops with a proper debug dump flag
34
plugins: Split out common cb expanders
35
plugins: Merge qemu_plugin_tb_insn_get to plugin-gen.c
36
plugins: Inline plugin_gen_empty_callback
37
plugins: Update the documentation block for plugin-gen.c
38
25
39
accel/tcg/plugin-helpers.h | 5 -
26
Pierrick Bouvier (1):
40
include/exec/helper-gen-common.h | 4 -
27
plugins: optimize cpu_index code generation
41
include/exec/helper-proto-common.h | 4 -
28
42
include/exec/plugin-gen.h | 4 -
29
Richard Henderson (70):
43
include/qemu/log.h | 1 +
30
tcg/optimize: Split out finish_bb, finish_ebb
44
include/qemu/plugin.h | 67 +--
31
tcg/optimize: Split out fold_affected_mask
45
include/tcg/helper-info.h | 3 +
32
tcg/optimize: Copy mask writeback to fold_masks
46
include/tcg/tcg-op-common.h | 4 +-
33
tcg/optimize: Split out fold_masks_zs
47
include/tcg/tcg-opc.h | 4 +-
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
48
include/tcg/tcg.h | 26 +-
35
tcg/optimize: Change representation of s_mask
49
include/exec/helper-gen.h.inc | 24 +-
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
50
accel/tcg/plugin-gen.c | 1007 +++++++++---------------------------
37
tcg/optimize: Introduce const value accessors for TempOptInfo
51
plugins/api.c | 26 +-
38
tcg/optimize: Use fold_masks_zs in fold_and
52
plugins/core.c | 61 ++-
39
tcg/optimize: Use fold_masks_zs in fold_andc
53
tcg/tcg-op-ldst.c | 6 +-
40
tcg/optimize: Use fold_masks_zs in fold_bswap
54
tcg/tcg-op.c | 8 +-
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
55
tcg/tcg.c | 78 ++-
42
tcg/optimize: Use fold_masks_z in fold_ctpop
56
tcg/tci.c | 1 +
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
57
util/log.c | 4 +
44
tcg/optimize: Compute sign mask in fold_deposit
58
19 files changed, 399 insertions(+), 938 deletions(-)
45
tcg/optimize: Use finish_folding in fold_divide
59
delete mode 100644 accel/tcg/plugin-helpers.h
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
47
tcg/optimize: Use fold_masks_s in fold_eqv
48
tcg/optimize: Use fold_masks_z in fold_extract
49
tcg/optimize: Use finish_folding in fold_extract2
50
tcg/optimize: Use fold_masks_zs in fold_exts
51
tcg/optimize: Use fold_masks_z in fold_extu
52
tcg/optimize: Use fold_masks_zs in fold_movcond
53
tcg/optimize: Use finish_folding in fold_mul*
54
tcg/optimize: Use fold_masks_s in fold_nand
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
100
101
include/exec/translator.h | 14 -
102
include/fpu/softfloat-types.h | 2 +
103
include/fpu/softfloat.h | 14 +-
104
include/hw/core/tcg-cpu-ops.h | 13 +
105
target/alpha/cpu.h | 2 +
106
target/arm/internals.h | 2 +
107
target/avr/cpu.h | 2 +
108
target/hexagon/cpu.h | 2 +
109
target/hexagon/fma_emu.h | 3 -
110
target/hppa/cpu.h | 2 +
111
target/i386/tcg/helper-tcg.h | 2 +
112
target/loongarch/internals.h | 2 +
113
target/m68k/cpu.h | 2 +
114
target/microblaze/cpu.h | 2 +
115
target/mips/tcg/tcg-internal.h | 2 +
116
target/openrisc/cpu.h | 2 +
117
target/ppc/cpu.h | 2 +
118
target/riscv/cpu.h | 3 +
119
target/rx/cpu.h | 2 +
120
target/s390x/s390x-internal.h | 2 +
121
target/sh4/cpu.h | 2 +
122
target/sparc/cpu.h | 2 +
123
target/sparc/helper.h | 4 +-
124
target/tricore/cpu.h | 2 +
125
target/xtensa/cpu.h | 2 +
126
accel/tcg/cpu-exec.c | 8 +-
127
accel/tcg/plugin-gen.c | 9 +
128
accel/tcg/translate-all.c | 8 +-
129
fpu/softfloat.c | 63 +--
130
target/alpha/cpu.c | 1 +
131
target/alpha/translate.c | 4 +-
132
target/arm/cpu.c | 1 +
133
target/arm/tcg/cpu-v7m.c | 1 +
134
target/arm/tcg/helper-a64.c | 6 +-
135
target/arm/tcg/translate.c | 5 +-
136
target/avr/cpu.c | 1 +
137
target/avr/translate.c | 6 +-
138
target/hexagon/cpu.c | 1 +
139
target/hexagon/fma_emu.c | 496 ++++++---------------
140
target/hexagon/op_helper.c | 125 ++----
141
target/hexagon/translate.c | 4 +-
142
target/hppa/cpu.c | 1 +
143
target/hppa/translate.c | 4 +-
144
target/i386/tcg/tcg-cpu.c | 1 +
145
target/i386/tcg/translate.c | 5 +-
146
target/loongarch/cpu.c | 1 +
147
target/loongarch/tcg/translate.c | 4 +-
148
target/m68k/cpu.c | 1 +
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
1
These placeholder helpers are no longer required.
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
5
---
13
---
6
accel/tcg/plugin-helpers.h | 5 -----
14
accel/tcg/plugin-gen.c | 9 +++++++++
7
include/exec/helper-gen-common.h | 4 ----
15
1 file changed, 9 insertions(+)
8
include/exec/helper-proto-common.h | 4 ----
9
accel/tcg/plugin-gen.c | 20 --------------------
10
4 files changed, 33 deletions(-)
11
delete mode 100644 accel/tcg/plugin-helpers.h
12
16
13
diff --git a/accel/tcg/plugin-helpers.h b/accel/tcg/plugin-helpers.h
14
deleted file mode 100644
15
index XXXXXXX..XXXXXXX
16
--- a/accel/tcg/plugin-helpers.h
17
+++ /dev/null
18
@@ -XXX,XX +XXX,XX @@
19
-#ifdef CONFIG_PLUGIN
20
-DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_wg, TCG_CALL_NO_WG | TCG_CALL_PLUGIN, void, i32, ptr)
21
-DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_rwg, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
22
-DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, i32, i64, ptr)
23
-#endif
24
diff --git a/include/exec/helper-gen-common.h b/include/exec/helper-gen-common.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/exec/helper-gen-common.h
27
+++ b/include/exec/helper-gen-common.h
28
@@ -XXX,XX +XXX,XX @@
29
#include "exec/helper-gen.h.inc"
30
#undef HELPER_H
31
32
-#define HELPER_H "accel/tcg/plugin-helpers.h"
33
-#include "exec/helper-gen.h.inc"
34
-#undef HELPER_H
35
-
36
#endif /* HELPER_GEN_COMMON_H */
37
diff --git a/include/exec/helper-proto-common.h b/include/exec/helper-proto-common.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/include/exec/helper-proto-common.h
40
+++ b/include/exec/helper-proto-common.h
41
@@ -XXX,XX +XXX,XX @@
42
#include "exec/helper-proto.h.inc"
43
#undef HELPER_H
44
45
-#define HELPER_H "accel/tcg/plugin-helpers.h"
46
-#include "exec/helper-proto.h.inc"
47
-#undef HELPER_H
48
-
49
#endif /* HELPER_PROTO_COMMON_H */
50
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
51
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
52
--- a/accel/tcg/plugin-gen.c
19
--- a/accel/tcg/plugin-gen.c
53
+++ b/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
54
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
55
#include "exec/exec-all.h"
22
56
#include "exec/plugin-gen.h"
23
static TCGv_i32 gen_cpu_index(void)
57
#include "exec/translator.h"
58
-#include "exec/helper-proto-common.h"
59
-
60
-#define HELPER_H "accel/tcg/plugin-helpers.h"
61
-#include "exec/helper-info.c.inc"
62
-#undef HELPER_H
63
64
/*
65
* plugin_cb_start TCG op args[]:
66
@@ -XXX,XX +XXX,XX @@ enum plugin_gen_cb {
67
PLUGIN_GEN_N_CBS,
68
};
69
70
-/*
71
- * These helpers are stubs that get dynamically switched out for calls
72
- * direct to the plugin if they are subscribed to.
73
- */
74
-void HELPER(plugin_vcpu_udata_cb_no_wg)(uint32_t cpu_index, void *udata)
75
-{ }
76
-
77
-void HELPER(plugin_vcpu_udata_cb_no_rwg)(uint32_t cpu_index, void *udata)
78
-{ }
79
-
80
-void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
81
- qemu_plugin_meminfo_t info, uint64_t vaddr,
82
- void *userdata)
83
-{ }
84
-
85
static void plugin_gen_empty_callback(enum plugin_gen_from from)
86
{
24
{
87
switch (from) {
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
88
--
37
--
89
2.34.1
38
2.43.0
90
91
diff view generated by jsdifflib
1
Introduce a new plugin_mem_cb op to hold the address temp
1
Call them directly from the opcode switch statement in tcg_optimize,
2
and meminfo computed by tcg-op-ldst.c. Because this now
2
rather than in finish_folding based on opcode flags. Adjust folding
3
has its own opcode, we no longer need PLUGIN_GEN_FROM_MEM.
3
of conditional branches to match.
4
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
include/exec/plugin-gen.h | 4 -
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
9
include/tcg/tcg-op-common.h | 1 +
9
1 file changed, 31 insertions(+), 16 deletions(-)
10
include/tcg/tcg-opc.h | 1 +
11
accel/tcg/plugin-gen.c | 408 ++++--------------------------------
12
tcg/tcg-op-ldst.c | 6 +-
13
tcg/tcg-op.c | 5 +
14
6 files changed, 54 insertions(+), 371 deletions(-)
15
10
16
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/plugin-gen.h
13
--- a/tcg/optimize.c
19
+++ b/include/exec/plugin-gen.h
14
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db);
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
21
void plugin_gen_insn_end(void);
22
23
void plugin_gen_disable_mem_helpers(void);
24
-void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info);
25
26
#else /* !CONFIG_PLUGIN */
27
28
@@ -XXX,XX +XXX,XX @@ static inline void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
29
static inline void plugin_gen_disable_mem_helpers(void)
30
{ }
31
32
-static inline void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
33
-{ }
34
-
35
#endif /* CONFIG_PLUGIN */
36
37
#endif /* QEMU_PLUGIN_GEN_H */
38
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/include/tcg/tcg-op-common.h
41
+++ b/include/tcg/tcg-op-common.h
42
@@ -XXX,XX +XXX,XX @@ void tcg_gen_goto_tb(unsigned idx);
43
void tcg_gen_lookup_and_goto_ptr(void);
44
45
void tcg_gen_plugin_cb(unsigned from);
46
+void tcg_gen_plugin_mem_cb(TCGv_i64 addr, unsigned meminfo);
47
void tcg_gen_plugin_cb_start(unsigned from, unsigned type, unsigned wr);
48
void tcg_gen_plugin_cb_end(void);
49
50
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
51
index XXXXXXX..XXXXXXX 100644
52
--- a/include/tcg/tcg-opc.h
53
+++ b/include/tcg/tcg-opc.h
54
@@ -XXX,XX +XXX,XX @@ DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
55
DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
56
57
DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
58
+DEF(plugin_mem_cb, 0, 1, 1, TCG_OPF_NOT_PRESENT)
59
DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT)
60
DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT)
61
62
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/accel/tcg/plugin-gen.c
65
+++ b/accel/tcg/plugin-gen.c
66
@@ -XXX,XX +XXX,XX @@
67
enum plugin_gen_from {
68
PLUGIN_GEN_FROM_TB,
69
PLUGIN_GEN_FROM_INSN,
70
- PLUGIN_GEN_FROM_MEM,
71
PLUGIN_GEN_AFTER_INSN,
72
PLUGIN_GEN_AFTER_TB,
73
PLUGIN_GEN_N_FROMS,
74
@@ -XXX,XX +XXX,XX @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
75
void *userdata)
76
{ }
77
78
-/*
79
- * For now we only support addi_i64.
80
- * When we support more ops, we can generate one empty inline cb for each.
81
- */
82
-static void gen_empty_inline_cb(void)
83
-{
84
- TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
85
- TCGv_ptr cpu_index_as_ptr = tcg_temp_ebb_new_ptr();
86
- TCGv_i64 val = tcg_temp_ebb_new_i64();
87
- TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
88
-
89
- tcg_gen_ld_i32(cpu_index, tcg_env,
90
- -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
91
- /* second operand will be replaced by immediate value */
92
- tcg_gen_mul_i32(cpu_index, cpu_index, cpu_index);
93
- tcg_gen_ext_i32_ptr(cpu_index_as_ptr, cpu_index);
94
-
95
- tcg_gen_movi_ptr(ptr, 0);
96
- tcg_gen_add_ptr(ptr, ptr, cpu_index_as_ptr);
97
- tcg_gen_ld_i64(val, ptr, 0);
98
- /* second operand will be replaced by immediate value */
99
- tcg_gen_add_i64(val, val, val);
100
-
101
- tcg_gen_st_i64(val, ptr, 0);
102
- tcg_temp_free_ptr(ptr);
103
- tcg_temp_free_i64(val);
104
- tcg_temp_free_ptr(cpu_index_as_ptr);
105
- tcg_temp_free_i32(cpu_index);
106
-}
107
-
108
-static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
109
-{
110
- TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
111
- TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
112
- TCGv_ptr udata = tcg_temp_ebb_new_ptr();
113
-
114
- tcg_gen_movi_i32(meminfo, info);
115
- tcg_gen_movi_ptr(udata, 0);
116
- tcg_gen_ld_i32(cpu_index, tcg_env,
117
- -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
118
-
119
- gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
120
-
121
- tcg_temp_free_ptr(udata);
122
- tcg_temp_free_i32(meminfo);
123
- tcg_temp_free_i32(cpu_index);
124
-}
125
-
126
-static void gen_plugin_cb_start(enum plugin_gen_from from,
127
- enum plugin_gen_cb type, unsigned wr)
128
-{
129
- tcg_gen_plugin_cb_start(from, type, wr);
130
-}
131
-
132
static void plugin_gen_empty_callback(enum plugin_gen_from from)
133
{
134
switch (from) {
135
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
136
}
16
}
137
}
17
}
138
18
139
-void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
19
+static void finish_bb(OptContext *ctx)
140
-{
20
+{
141
- enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
21
+ /* We only optimize memory barriers across basic blocks. */
142
-
22
+ ctx->prev_mb = NULL;
143
- gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw);
23
+}
144
- gen_empty_mem_cb(addr, info);
24
+
145
- tcg_gen_plugin_cb_end();
25
+static void finish_ebb(OptContext *ctx)
146
-
26
+{
147
- gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw);
27
+ finish_bb(ctx);
148
- gen_empty_inline_cb();
28
+ /* We only optimize across extended basic blocks. */
149
- tcg_gen_plugin_cb_end();
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
150
-}
30
+ remove_mem_copy_all(ctx);
151
-
31
+}
152
-static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
32
+
153
-{
33
static void finish_folding(OptContext *ctx, TCGOp *op)
154
- while (op) {
34
{
155
- if (op->opc == opc) {
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
156
- return op;
36
int i, nb_oargs;
37
38
- /*
39
- * We only optimize extended basic blocks. If the opcode ends a BB
40
- * and is not a conditional branch, reset all temp data.
41
- */
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
157
- }
47
- }
158
- op = QTAILQ_NEXT(op, link);
159
- }
160
- return NULL;
161
-}
162
-
163
-static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end)
164
-{
165
- TCGOp *ret = QTAILQ_NEXT(end, link);
166
-
167
- QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link);
168
- return ret;
169
-}
170
-
171
-/* remove all ops until (and including) plugin_cb_end */
172
-static TCGOp *rm_ops(TCGOp *op)
173
-{
174
- TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end);
175
-
176
- tcg_debug_assert(end_op);
177
- return rm_ops_range(op, end_op);
178
-}
179
-
180
-static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op)
181
-{
182
- TCGOp *old_op = QTAILQ_NEXT(*begin_op, link);
183
- unsigned nargs = old_op->nargs;
184
-
185
- *begin_op = old_op;
186
- op = tcg_op_insert_after(tcg_ctx, op, old_op->opc, nargs);
187
- memcpy(op->args, old_op->args, sizeof(op->args[0]) * nargs);
188
-
189
- return op;
190
-}
191
-
192
-static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc)
193
-{
194
- op = copy_op_nocheck(begin_op, op);
195
- tcg_debug_assert((*begin_op)->opc == opc);
196
- return op;
197
-}
198
-
199
-static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr)
200
-{
201
- if (UINTPTR_MAX == UINT32_MAX) {
202
- /* mov_i32 */
203
- op = copy_op(begin_op, op, INDEX_op_mov_i32);
204
- op->args[1] = tcgv_i32_arg(tcg_constant_i32((uintptr_t)ptr));
205
- } else {
206
- /* mov_i64 */
207
- op = copy_op(begin_op, op, INDEX_op_mov_i64);
208
- op->args[1] = tcgv_i64_arg(tcg_constant_i64((uintptr_t)ptr));
209
- }
210
- return op;
211
-}
212
-
213
-static TCGOp *copy_ld_i32(TCGOp **begin_op, TCGOp *op)
214
-{
215
- return copy_op(begin_op, op, INDEX_op_ld_i32);
216
-}
217
-
218
-static TCGOp *copy_ext_i32_ptr(TCGOp **begin_op, TCGOp *op)
219
-{
220
- if (UINTPTR_MAX == UINT32_MAX) {
221
- op = copy_op(begin_op, op, INDEX_op_mov_i32);
222
- } else {
223
- op = copy_op(begin_op, op, INDEX_op_ext_i32_i64);
224
- }
225
- return op;
226
-}
227
-
228
-static TCGOp *copy_add_ptr(TCGOp **begin_op, TCGOp *op)
229
-{
230
- if (UINTPTR_MAX == UINT32_MAX) {
231
- op = copy_op(begin_op, op, INDEX_op_add_i32);
232
- } else {
233
- op = copy_op(begin_op, op, INDEX_op_add_i64);
234
- }
235
- return op;
236
-}
237
-
238
-static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op)
239
-{
240
- if (TCG_TARGET_REG_BITS == 32) {
241
- /* 2x ld_i32 */
242
- op = copy_ld_i32(begin_op, op);
243
- op = copy_ld_i32(begin_op, op);
244
- } else {
245
- /* ld_i64 */
246
- op = copy_op(begin_op, op, INDEX_op_ld_i64);
247
- }
248
- return op;
249
-}
250
-
251
-static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op)
252
-{
253
- if (TCG_TARGET_REG_BITS == 32) {
254
- /* 2x st_i32 */
255
- op = copy_op(begin_op, op, INDEX_op_st_i32);
256
- op = copy_op(begin_op, op, INDEX_op_st_i32);
257
- } else {
258
- /* st_i64 */
259
- op = copy_op(begin_op, op, INDEX_op_st_i64);
260
- }
261
- return op;
262
-}
263
-
264
-static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op, uint64_t v)
265
-{
266
- if (TCG_TARGET_REG_BITS == 32) {
267
- /* all 32-bit backends must implement add2_i32 */
268
- g_assert(TCG_TARGET_HAS_add2_i32);
269
- op = copy_op(begin_op, op, INDEX_op_add2_i32);
270
- op->args[4] = tcgv_i32_arg(tcg_constant_i32(v));
271
- op->args[5] = tcgv_i32_arg(tcg_constant_i32(v >> 32));
272
- } else {
273
- op = copy_op(begin_op, op, INDEX_op_add_i64);
274
- op->args[2] = tcgv_i64_arg(tcg_constant_i64(v));
275
- }
276
- return op;
277
-}
278
-
279
-static TCGOp *copy_mul_i32(TCGOp **begin_op, TCGOp *op, uint32_t v)
280
-{
281
- op = copy_op(begin_op, op, INDEX_op_mul_i32);
282
- op->args[2] = tcgv_i32_arg(tcg_constant_i32(v));
283
- return op;
284
-}
285
-
286
-static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx)
287
-{
288
- TCGOp *old_op;
289
- int func_idx;
290
-
291
- /* copy all ops until the call */
292
- do {
293
- op = copy_op_nocheck(begin_op, op);
294
- } while (op->opc != INDEX_op_call);
295
-
296
- /* fill in the op call */
297
- old_op = *begin_op;
298
- TCGOP_CALLI(op) = TCGOP_CALLI(old_op);
299
- TCGOP_CALLO(op) = TCGOP_CALLO(old_op);
300
- tcg_debug_assert(op->life == 0);
301
-
302
- func_idx = TCGOP_CALLO(op) + TCGOP_CALLI(op);
303
- *cb_idx = func_idx;
304
- op->args[func_idx] = (uintptr_t)func;
305
-
306
- return op;
307
-}
308
-
309
-static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
310
- TCGOp *begin_op, TCGOp *op,
311
- int *unused)
312
-{
313
- char *ptr = cb->inline_insn.entry.score->data->data;
314
- size_t elem_size = g_array_get_element_size(
315
- cb->inline_insn.entry.score->data);
316
- size_t offset = cb->inline_insn.entry.offset;
317
-
318
- op = copy_ld_i32(&begin_op, op);
319
- op = copy_mul_i32(&begin_op, op, elem_size);
320
- op = copy_ext_i32_ptr(&begin_op, op);
321
- op = copy_const_ptr(&begin_op, op, ptr + offset);
322
- op = copy_add_ptr(&begin_op, op);
323
- op = copy_ld_i64(&begin_op, op);
324
- op = copy_add_i64(&begin_op, op, cb->inline_insn.imm);
325
- op = copy_st_i64(&begin_op, op);
326
- return op;
327
-}
328
-
329
-static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
330
- TCGOp *begin_op, TCGOp *op, int *cb_idx)
331
-{
332
- enum plugin_gen_cb type = begin_op->args[1];
333
-
334
- tcg_debug_assert(type == PLUGIN_GEN_CB_MEM);
335
-
336
- /* const_i32 == mov_i32 ("info", so it remains as is) */
337
- op = copy_op(&begin_op, op, INDEX_op_mov_i32);
338
-
339
- /* const_ptr */
340
- op = copy_const_ptr(&begin_op, op, cb->userp);
341
-
342
- /* copy the ld_i32, but note that we only have to copy it once */
343
- if (*cb_idx == -1) {
344
- op = copy_op(&begin_op, op, INDEX_op_ld_i32);
345
- } else {
346
- begin_op = QTAILQ_NEXT(begin_op, link);
347
- tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
348
- }
349
-
350
- if (type == PLUGIN_GEN_CB_MEM) {
351
- /* call */
352
- op = copy_call(&begin_op, op, cb->regular.f.vcpu_udata, cb_idx);
353
- }
354
-
355
- return op;
356
-}
357
-
358
-typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
359
- TCGOp *begin_op, TCGOp *op, int *intp);
360
-typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
361
-
362
-static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
363
-{
364
- int w;
365
-
366
- w = op->args[2];
367
- return !!(cb->rw & (w + 1));
368
-}
369
-
370
-static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
371
- inject_fn inject, op_ok_fn ok)
372
-{
373
- TCGOp *end_op;
374
- TCGOp *op;
375
- int cb_idx = -1;
376
- int i;
377
-
378
- if (!cbs || cbs->len == 0) {
379
- rm_ops(begin_op);
380
- return;
48
- return;
381
- }
49
- }
382
-
50
-
383
- end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
51
nb_oargs = def->nb_oargs;
384
- tcg_debug_assert(end_op);
52
for (i = 0; i < nb_oargs; i++) {
385
-
53
TCGTemp *ts = arg_temp(op->args[i]);
386
- op = end_op;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
387
- for (i = 0; i < cbs->len; i++) {
55
if (i > 0) {
388
- struct qemu_plugin_dyn_cb *cb =
56
op->opc = INDEX_op_br;
389
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
57
op->args[0] = op->args[3];
390
-
58
+ finish_ebb(ctx);
391
- if (!ok(begin_op, cb)) {
59
+ } else {
392
- continue;
60
+ finish_bb(ctx);
393
- }
394
- op = inject(cb, begin_op, op, &cb_idx);
395
- }
396
- rm_ops_range(begin_op, end_op);
397
-}
398
-
399
-static void
400
-inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
401
-{
402
- inject_cb_type(cbs, begin_op, append_inline_cb, ok);
403
-}
404
-
405
-static void
406
-inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
407
-{
408
- inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
409
-}
410
-
411
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
412
void plugin_gen_disable_mem_helpers(void)
413
{
414
@@ -XXX,XX +XXX,XX @@ void plugin_gen_disable_mem_helpers(void)
415
}
61
}
62
- return false;
63
+ return true;
416
}
64
}
417
65
418
-static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
419
- TCGOp *begin_op, int insn_idx)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
420
-{
68
}
421
- struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
69
op->opc = INDEX_op_br;
422
- inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op);
70
op->args[0] = label;
423
-}
71
- break;
424
-
72
+ finish_ebb(ctx);
425
-static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
73
+ return true;
426
- TCGOp *begin_op, int insn_idx)
74
}
427
-{
75
- return false;
428
- const GArray *cbs;
76
+
429
- struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
77
+ finish_bb(ctx);
430
-
78
+ return true;
431
- cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
432
- inject_inline_cb(cbs, begin_op, op_rw);
433
-}
434
-
435
static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
436
struct qemu_plugin_insn *insn)
437
{
438
@@ -XXX,XX +XXX,XX @@ static void gen_inline_cb(struct qemu_plugin_dyn_cb *cb)
439
tcg_temp_free_ptr(ptr);
440
}
79
}
441
80
442
+static void gen_mem_cb(struct qemu_plugin_dyn_cb *cb,
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
443
+ qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
444
+{
83
CASE_OP_32_64_VEC(xor):
445
+ TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
84
done = fold_xor(&ctx, op);
446
+
85
break;
447
+ tcg_gen_ld_i32(cpu_index, tcg_env,
86
+ case INDEX_op_set_label:
448
+ -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
87
+ case INDEX_op_br:
449
+ tcg_gen_call4(cb->regular.f.vcpu_mem, cb->regular.info, NULL,
88
+ case INDEX_op_exit_tb:
450
+ tcgv_i32_temp(cpu_index),
89
+ case INDEX_op_goto_tb:
451
+ tcgv_i32_temp(tcg_constant_i32(meminfo)),
90
+ case INDEX_op_goto_ptr:
452
+ tcgv_i64_temp(addr),
91
+ finish_ebb(&ctx);
453
+ tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
92
+ done = true;
454
+ tcg_temp_free_i32(cpu_index);
93
+ break;
455
+}
94
default:
456
+
457
/* #define DEBUG_PLUGIN_GEN_OPS */
458
static void pr_ops(void)
459
{
460
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
461
break;
95
break;
462
}
96
}
463
464
- case INDEX_op_plugin_cb_start:
465
+ case INDEX_op_plugin_mem_cb:
466
{
467
- enum plugin_gen_from from = op->args[0];
468
- enum plugin_gen_cb type = op->args[1];
469
+ TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0]));
470
+ qemu_plugin_meminfo_t meminfo = op->args[1];
471
+ struct qemu_plugin_insn *insn;
472
+ const GArray *cbs;
473
+ int i, n, rw;
474
475
- switch (from) {
476
- case PLUGIN_GEN_FROM_MEM:
477
- {
478
- g_assert(insn_idx >= 0);
479
+ assert(insn_idx >= 0);
480
+ insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
481
+ rw = qemu_plugin_mem_is_store(meminfo) ? 2 : 1;
482
483
- switch (type) {
484
- case PLUGIN_GEN_CB_MEM:
485
- plugin_gen_mem_regular(plugin_tb, op, insn_idx);
486
- break;
487
- case PLUGIN_GEN_CB_INLINE:
488
- plugin_gen_mem_inline(plugin_tb, op, insn_idx);
489
- break;
490
- default:
491
- g_assert_not_reached();
492
+ tcg_ctx->emit_before_op = op;
493
+
494
+ cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
495
+ for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
496
+ struct qemu_plugin_dyn_cb *cb =
497
+ &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
498
+ if (cb->rw & rw) {
499
+ gen_mem_cb(cb, meminfo, addr);
500
}
501
+ }
502
503
- break;
504
- }
505
- default:
506
- g_assert_not_reached();
507
+ cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
508
+ for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
509
+ struct qemu_plugin_dyn_cb *cb =
510
+ &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
511
+ if (cb->rw & rw) {
512
+ gen_inline_cb(cb);
513
+ }
514
}
515
+
516
+ tcg_ctx->emit_before_op = NULL;
517
+ tcg_op_remove(tcg_ctx, op);
518
break;
519
}
520
+
521
default:
522
/* plugins don't care about any other ops */
523
break;
524
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
525
index XXXXXXX..XXXXXXX 100644
526
--- a/tcg/tcg-op-ldst.c
527
+++ b/tcg/tcg-op-ldst.c
528
@@ -XXX,XX +XXX,XX @@ plugin_gen_mem_callbacks(TCGv_i64 copy_addr, TCGTemp *orig_addr, MemOpIdx oi,
529
copy_addr = tcg_temp_ebb_new_i64();
530
tcg_gen_extu_i32_i64(copy_addr, temp_tcgv_i32(orig_addr));
531
}
532
- plugin_gen_empty_mem_callback(copy_addr, info);
533
+ tcg_gen_plugin_mem_cb(copy_addr, info);
534
tcg_temp_free_i64(copy_addr);
535
} else {
536
if (copy_addr) {
537
- plugin_gen_empty_mem_callback(copy_addr, info);
538
+ tcg_gen_plugin_mem_cb(copy_addr, info);
539
tcg_temp_free_i64(copy_addr);
540
} else {
541
- plugin_gen_empty_mem_callback(temp_tcgv_i64(orig_addr), info);
542
+ tcg_gen_plugin_mem_cb(temp_tcgv_i64(orig_addr), info);
543
}
544
}
545
}
546
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/tcg/tcg-op.c
549
+++ b/tcg/tcg-op.c
550
@@ -XXX,XX +XXX,XX @@ void tcg_gen_plugin_cb(unsigned from)
551
tcg_gen_op1(INDEX_op_plugin_cb, from);
552
}
553
554
+void tcg_gen_plugin_mem_cb(TCGv_i64 addr, unsigned meminfo)
555
+{
556
+ tcg_gen_op2(INDEX_op_plugin_mem_cb, tcgv_i64_arg(addr), meminfo);
557
+}
558
+
559
void tcg_gen_plugin_cb_start(unsigned from, unsigned type, unsigned wr)
560
{
561
tcg_gen_op3(INDEX_op_plugin_cb_start, from, type, wr);
562
--
97
--
563
2.34.1
98
2.43.0
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
1
TCGHelperInfo includes the ABI for every function call.
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
2
5
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
8
---
6
include/qemu/plugin.h | 1 +
9
tcg/optimize.c | 21 ++++++---------------
7
plugins/core.c | 51 ++++++++++++++++++++++++++++++++++++++-----
10
1 file changed, 6 insertions(+), 15 deletions(-)
8
2 files changed, 46 insertions(+), 6 deletions(-)
9
11
10
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
12
--- a/include/qemu/plugin.h
14
--- a/tcg/optimize.c
13
+++ b/include/qemu/plugin.h
15
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_dyn_cb {
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
union {
17
* Record "zero" and "sign" masks for the single output of @op.
16
struct {
18
* See TempOptInfo definition of z_mask and s_mask.
17
union qemu_plugin_cb_sig f;
19
* If z_mask allows, fold the output to constant zero.
18
+ TCGHelperInfo *info;
20
+ * The passed s_mask may be augmented by z_mask.
19
} regular;
21
*/
20
struct {
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
21
qemu_plugin_u64 entry;
23
uint64_t z_mask, uint64_t s_mask)
22
diff --git a/plugins/core.c b/plugins/core.c
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
index XXXXXXX..XXXXXXX 100644
25
24
--- a/plugins/core.c
26
ti = ts_info(ts);
25
+++ b/plugins/core.c
27
ti->z_mask = z_mask;
26
@@ -XXX,XX +XXX,XX @@ void plugin_register_dyn_cb__udata(GArray **arr,
28
- ti->s_mask = s_mask;
27
enum qemu_plugin_cb_flags flags,
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
28
void *udata)
30
return true;
29
{
30
- struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
31
+ static TCGHelperInfo info[3] = {
32
+ [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG | TCG_CALL_PLUGIN,
33
+ [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG | TCG_CALL_PLUGIN,
34
+ [QEMU_PLUGIN_CB_RW_REGS].flags = TCG_CALL_PLUGIN,
35
+ /*
36
+ * Match qemu_plugin_vcpu_udata_cb_t:
37
+ * void (*)(uint32_t, void *)
38
+ */
39
+ [0 ... 2].typemask = (dh_typemask(void, 0) |
40
+ dh_typemask(i32, 1) |
41
+ dh_typemask(ptr, 2))
42
+ };
43
44
+ struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
45
dyn_cb->userp = udata;
46
- /* Note flags are discarded as unused. */
47
- dyn_cb->regular.f.vcpu_udata = cb;
48
dyn_cb->type = PLUGIN_CB_REGULAR;
49
+ dyn_cb->regular.f.vcpu_udata = cb;
50
+
51
+ assert((unsigned)flags < ARRAY_SIZE(info));
52
+ dyn_cb->regular.info = &info[flags];
53
}
31
}
54
32
55
void plugin_register_vcpu_mem_cb(GArray **arr,
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
56
@@ -XXX,XX +XXX,XX @@ void plugin_register_vcpu_mem_cb(GArray **arr,
34
default:
57
enum qemu_plugin_mem_rw rw,
35
g_assert_not_reached();
58
void *udata)
36
}
59
{
37
- s_mask = smask_from_zmask(z_mask);
60
- struct qemu_plugin_dyn_cb *dyn_cb;
38
61
+ /*
39
+ s_mask = 0;
62
+ * Expect that the underlying type for enum qemu_plugin_meminfo_t
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
63
+ * is either int32_t or uint32_t, aka int or unsigned int.
41
case TCG_BSWAP_OZ:
64
+ */
42
break;
65
+ QEMU_BUILD_BUG_ON(
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
66
+ !__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t) &&
44
default:
67
+ !__builtin_types_compatible_p(qemu_plugin_meminfo_t, int32_t));
45
/* The high bits are undefined: force all bits above the sign to 1. */
68
46
z_mask |= sign << 1;
69
- dyn_cb = plugin_get_dyn_cb(arr);
47
- s_mask = 0;
70
+ static TCGHelperInfo info[3] = {
48
break;
71
+ [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG | TCG_CALL_PLUGIN,
49
}
72
+ [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG | TCG_CALL_PLUGIN,
50
ctx->z_mask = z_mask;
73
+ [QEMU_PLUGIN_CB_RW_REGS].flags = TCG_CALL_PLUGIN,
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
74
+ /*
52
g_assert_not_reached();
75
+ * Match qemu_plugin_vcpu_mem_cb_t:
53
}
76
+ * void (*)(uint32_t, qemu_plugin_meminfo_t, uint64_t, void *)
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
77
+ */
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
78
+ [0 ... 2].typemask =
56
return false;
79
+ (dh_typemask(void, 0) |
80
+ dh_typemask(i32, 1) |
81
+ (__builtin_types_compatible_p(qemu_plugin_meminfo_t, uint32_t)
82
+ ? dh_typemask(i32, 2) : dh_typemask(s32, 2)) |
83
+ dh_typemask(i64, 3) |
84
+ dh_typemask(ptr, 4))
85
+ };
86
+
87
+ struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
88
dyn_cb->userp = udata;
89
- /* Note flags are discarded as unused. */
90
dyn_cb->type = PLUGIN_CB_REGULAR;
91
dyn_cb->rw = rw;
92
dyn_cb->regular.f.vcpu_mem = cb;
93
+
94
+ assert((unsigned)flags < ARRAY_SIZE(info));
95
+ dyn_cb->regular.info = &info[flags];
96
}
57
}
97
58
98
/*
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
60
default:
61
g_assert_not_reached();
62
}
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
64
return false;
65
}
66
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
68
return true;
69
}
70
ctx->z_mask = z_mask;
71
- ctx->s_mask = smask_from_zmask(z_mask);
72
73
return fold_masks(ctx, op);
74
}
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
76
}
77
78
ctx->z_mask = z_mask;
79
- ctx->s_mask = smask_from_zmask(z_mask);
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
81
return true;
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
103
}
104
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
106
}
107
108
ctx->z_mask = 1;
109
- ctx->s_mask = smask_from_zmask(1);
110
return false;
111
112
do_setcond_const:
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
114
break;
115
CASE_OP_32_64(ld8u):
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
118
break;
119
CASE_OP_32_64(ld16s):
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
121
break;
122
CASE_OP_32_64(ld16u):
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
125
break;
126
case INDEX_op_ld32s_i64:
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
128
break;
129
case INDEX_op_ld32u_i64:
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
132
break;
133
default:
134
g_assert_not_reached();
99
--
135
--
100
2.34.1
136
2.43.0
diff view generated by jsdifflib
1
By having the qemu_plugin_cb_flags be recorded in the TCGHelperInfo,
1
Change the representation from sign bit repetitions to all bits equal
2
we no longer need to distinguish PLUGIN_CB_REGULAR from
2
to the sign bit, including the sign bit itself.
3
PLUGIN_CB_REGULAR_R, so place all TB callbacks in the same queue.
3
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
4
20
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
23
---
8
accel/tcg/plugin-gen.c | 96 +++++++++++++++++++++++++-----------------
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
9
plugins/api.c | 6 +--
25
1 file changed, 15 insertions(+), 49 deletions(-)
10
2 files changed, 58 insertions(+), 44 deletions(-)
11
26
12
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/plugin-gen.c
29
--- a/tcg/optimize.c
15
+++ b/accel/tcg/plugin-gen.c
30
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
17
{
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
18
switch (from) {
33
uint64_t val;
19
case PLUGIN_GEN_AFTER_INSN:
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
20
+ case PLUGIN_GEN_FROM_TB:
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
21
tcg_gen_plugin_cb(from);
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
22
break;
37
} TempOptInfo;
23
case PLUGIN_GEN_FROM_INSN:
38
24
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
39
typedef struct OptContext {
25
*/
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
26
gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
41
27
gen_empty_mem_helper);
42
/* In flight values from optimization. */
28
- /* fall through */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
29
- case PLUGIN_GEN_FROM_TB:
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
30
gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb_no_rwg);
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
31
gen_wrapped(from, PLUGIN_GEN_CB_UDATA_R, gen_empty_udata_cb_no_wg);
46
TCGType type;
32
gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
47
} OptContext;
33
@@ -XXX,XX +XXX,XX @@ void plugin_gen_disable_mem_helpers(void)
48
34
offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
49
-/* Calculate the smask for a specific value. */
35
}
50
-static uint64_t smask_from_value(uint64_t value)
36
37
-static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
38
- TCGOp *begin_op)
39
-{
51
-{
40
- inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
41
-}
54
-}
42
-
55
-
43
-static void plugin_gen_tb_udata_r(const struct qemu_plugin_tb *ptb,
56
-/*
44
- TCGOp *begin_op)
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
45
-{
63
-{
46
- inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR_R], begin_op);
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
47
-}
74
-}
48
-
75
-
49
-static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
76
-/*
50
- TCGOp *begin_op)
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
51
-{
83
-{
52
- inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok);
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
53
-}
86
-}
54
-
87
-
55
static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
56
TCGOp *begin_op, int insn_idx)
57
{
89
{
58
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(struct qemu_plugin_tb *ptb,
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
59
}
106
}
60
}
107
}
61
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
62
+static void gen_udata_cb(struct qemu_plugin_dyn_cb *cb)
109
* The passed s_mask may be augmented by z_mask.
63
+{
110
*/
64
+ TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
112
- uint64_t z_mask, uint64_t s_mask)
113
+ uint64_t z_mask, int64_t s_mask)
114
{
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
116
TCGTemp *ts;
117
TempOptInfo *ti;
118
+ int rep;
119
120
/* Only single-output opcodes are supported here. */
121
tcg_debug_assert(def->nb_oargs == 1);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
125
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
65
+
136
+
66
+ tcg_gen_ld_i32(cpu_index, tcg_env,
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
67
+ -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
138
+ rep = clz64(~s_mask);
68
+ tcg_gen_call2(cb->regular.f.vcpu_udata, cb->regular.info, NULL,
139
+ rep = MAX(rep, clz64(z_mask));
69
+ tcgv_i32_temp(cpu_index),
140
+ rep = MAX(rep - 1, 0);
70
+ tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
141
+ ti->s_mask = INT64_MIN >> rep;
71
+ tcg_temp_free_i32(cpu_index);
72
+}
73
+
142
+
74
+static void gen_inline_cb(struct qemu_plugin_dyn_cb *cb)
143
return true;
75
+{
144
}
76
+ GArray *arr = cb->inline_insn.entry.score->data;
145
77
+ size_t offset = cb->inline_insn.entry.offset;
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
78
+ TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
147
79
+ TCGv_i64 val = tcg_temp_ebb_new_i64();
148
ctx->z_mask = z_mask;
80
+ TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
149
ctx->s_mask = s_mask;
81
+
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
82
+ tcg_gen_ld_i32(cpu_index, tcg_env,
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
83
+ -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
152
return true;
84
+ tcg_gen_muli_i32(cpu_index, cpu_index, g_array_get_element_size(arr));
85
+ tcg_gen_ext_i32_ptr(ptr, cpu_index);
86
+ tcg_temp_free_i32(cpu_index);
87
+
88
+ tcg_gen_addi_ptr(ptr, ptr, (intptr_t)arr->data);
89
+ tcg_gen_ld_i64(val, ptr, offset);
90
+ tcg_gen_addi_i64(val, val, cb->inline_insn.imm);
91
+ tcg_gen_st_i64(val, ptr, offset);
92
+
93
+ tcg_temp_free_i64(val);
94
+ tcg_temp_free_ptr(ptr);
95
+}
96
+
97
/* #define DEBUG_PLUGIN_GEN_OPS */
98
static void pr_ops(void)
99
{
100
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
101
{
102
enum plugin_gen_from from = op->args[0];
103
struct qemu_plugin_insn *insn = NULL;
104
+ const GArray *cbs;
105
+ int i, n;
106
107
if (insn_idx >= 0) {
108
insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
109
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
110
assert(insn != NULL);
111
gen_disable_mem_helper(plugin_tb, insn);
112
break;
113
+
114
+ case PLUGIN_GEN_FROM_TB:
115
+ assert(insn == NULL);
116
+
117
+ cbs = plugin_tb->cbs[PLUGIN_CB_REGULAR];
118
+ for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
119
+ struct qemu_plugin_dyn_cb *cb =
120
+ &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
121
+ gen_udata_cb(cb);
122
+ }
123
+
124
+ cbs = plugin_tb->cbs[PLUGIN_CB_INLINE];
125
+ for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
126
+ struct qemu_plugin_dyn_cb *cb =
127
+ &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
128
+ gen_inline_cb(cb);
129
+ }
130
+ break;
131
+
132
default:
133
g_assert_not_reached();
134
}
135
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
136
enum plugin_gen_cb type = op->args[1];
137
138
switch (from) {
139
- case PLUGIN_GEN_FROM_TB:
140
- {
141
- g_assert(insn_idx == -1);
142
-
143
- switch (type) {
144
- case PLUGIN_GEN_CB_UDATA:
145
- plugin_gen_tb_udata(plugin_tb, op);
146
- break;
147
- case PLUGIN_GEN_CB_UDATA_R:
148
- plugin_gen_tb_udata_r(plugin_tb, op);
149
- break;
150
- case PLUGIN_GEN_CB_INLINE:
151
- plugin_gen_tb_inline(plugin_tb, op);
152
- break;
153
- default:
154
- g_assert_not_reached();
155
- }
156
- break;
157
- }
158
case PLUGIN_GEN_FROM_INSN:
159
{
160
g_assert(insn_idx >= 0);
161
diff --git a/plugins/api.c b/plugins/api.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/plugins/api.c
164
+++ b/plugins/api.c
165
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
166
void *udata)
167
{
168
if (!tb->mem_only) {
169
- int index = flags == QEMU_PLUGIN_CB_R_REGS ||
170
- flags == QEMU_PLUGIN_CB_RW_REGS ?
171
- PLUGIN_CB_REGULAR_R : PLUGIN_CB_REGULAR;
172
-
173
- plugin_register_dyn_cb__udata(&tb->cbs[index],
174
+ plugin_register_dyn_cb__udata(&tb->cbs[PLUGIN_CB_REGULAR],
175
cb, flags, udata);
176
}
153
}
177
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
157
ctx->s_mask = s_mask;
158
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
166
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
168
- ctx->s_mask = smask_from_smask(s_mask);
169
170
return fold_masks(ctx, op);
171
}
178
--
172
--
179
2.34.1
173
2.43.0
diff view generated by jsdifflib
1
Introduce a new plugin_cb op and migrate one operation.
2
By using emit_before_op, we do not need to emit opcodes
3
early and modify them later -- we can simply emit the
4
final set of opcodes once.
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
3
---
9
include/tcg/tcg-op-common.h | 1 +
4
tcg/optimize.c | 9 +++++----
10
include/tcg/tcg-opc.h | 1 +
5
1 file changed, 5 insertions(+), 4 deletions(-)
11
accel/tcg/plugin-gen.c | 74 +++++++++++++++++++++----------------
12
tcg/tcg-op.c | 5 +++
13
4 files changed, 50 insertions(+), 31 deletions(-)
14
6
15
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
17
--- a/include/tcg/tcg-op-common.h
9
--- a/tcg/optimize.c
18
+++ b/include/tcg/tcg-op-common.h
10
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ void tcg_gen_goto_tb(unsigned idx);
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
20
*/
12
remove_mem_copy_all(ctx);
21
void tcg_gen_lookup_and_goto_ptr(void);
13
}
22
14
23
+void tcg_gen_plugin_cb(unsigned from);
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
24
void tcg_gen_plugin_cb_start(unsigned from, unsigned type, unsigned wr);
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
25
void tcg_gen_plugin_cb_end(void);
26
27
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/tcg/tcg-opc.h
30
+++ b/include/tcg/tcg-opc.h
31
@@ -XXX,XX +XXX,XX @@ DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
32
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
33
DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
34
35
+DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
36
DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT)
37
DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT)
38
39
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/accel/tcg/plugin-gen.c
42
+++ b/accel/tcg/plugin-gen.c
43
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
44
{
17
{
45
switch (from) {
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
46
case PLUGIN_GEN_AFTER_INSN:
19
int i, nb_oargs;
47
- gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
48
- gen_empty_mem_helper);
21
ts_info(ts)->z_mask = ctx->z_mask;
49
+ tcg_gen_plugin_cb(from);
22
}
50
break;
23
}
51
case PLUGIN_GEN_FROM_INSN:
24
+ return true;
52
/*
53
@@ -XXX,XX +XXX,XX @@ static void inject_mem_enable_helper(struct qemu_plugin_tb *ptb,
54
inject_mem_helper(begin_op, arr);
55
}
25
}
56
26
57
-static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn,
27
/*
58
- TCGOp *begin_op)
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
59
-{
29
fold_xi_to_x(ctx, op, 0)) {
60
- if (likely(!plugin_insn->mem_helper)) {
30
return true;
61
- rm_ops(begin_op);
31
}
62
- return;
32
- return false;
63
- }
33
+ return finish_folding(ctx, op);
64
- inject_mem_helper(begin_op, NULL);
65
-}
66
-
67
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
68
void plugin_gen_disable_mem_helpers(void)
69
{
70
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
71
inject_mem_enable_helper(ptb, insn, begin_op);
72
}
34
}
73
35
74
-static void plugin_gen_disable_mem_helper(struct qemu_plugin_tb *ptb,
36
/* We cannot as yet do_constant_folding with vectors. */
75
- TCGOp *begin_op, int insn_idx)
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
76
+static void gen_disable_mem_helper(struct qemu_plugin_tb *ptb,
38
fold_xi_to_x(ctx, op, 0)) {
77
+ struct qemu_plugin_insn *insn)
39
return true;
78
{
40
}
79
- struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
41
- return false;
80
- inject_mem_disable_helper(insn, begin_op);
42
+ return finish_folding(ctx, op);
81
+ if (insn->mem_helper) {
82
+ tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
83
+ offsetof(CPUState, plugin_mem_cbs) -
84
+ offsetof(ArchCPU, env));
85
+ }
86
}
43
}
87
44
88
/* #define DEBUG_PLUGIN_GEN_OPS */
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
89
@@ -XXX,XX +XXX,XX @@ static void pr_ops(void)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
90
47
op->args[4] = arg_new_constant(ctx, bl);
91
static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
48
op->args[5] = arg_new_constant(ctx, bh);
92
{
93
- TCGOp *op;
94
+ TCGOp *op, *next;
95
int insn_idx = -1;
96
97
pr_ops();
98
99
- QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
100
+ /*
101
+ * While injecting code, we cannot afford to reuse any ebb temps
102
+ * that might be live within the existing opcode stream.
103
+ * The simplest solution is to release them all and create new.
104
+ */
105
+ memset(tcg_ctx->free_temps, 0, sizeof(tcg_ctx->free_temps));
106
+
107
+ QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) {
108
switch (op->opc) {
109
case INDEX_op_insn_start:
110
insn_idx++;
111
break;
112
+
113
+ case INDEX_op_plugin_cb:
114
+ {
115
+ enum plugin_gen_from from = op->args[0];
116
+ struct qemu_plugin_insn *insn = NULL;
117
+
118
+ if (insn_idx >= 0) {
119
+ insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
120
+ }
121
+
122
+ tcg_ctx->emit_before_op = op;
123
+
124
+ switch (from) {
125
+ case PLUGIN_GEN_AFTER_INSN:
126
+ assert(insn != NULL);
127
+ gen_disable_mem_helper(plugin_tb, insn);
128
+ break;
129
+ default:
130
+ g_assert_not_reached();
131
+ }
132
+
133
+ tcg_ctx->emit_before_op = NULL;
134
+ tcg_op_remove(tcg_ctx, op);
135
+ break;
136
+ }
137
+
138
case INDEX_op_plugin_cb_start:
139
{
140
enum plugin_gen_from from = op->args[0];
141
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
142
143
break;
144
}
145
- case PLUGIN_GEN_AFTER_INSN:
146
- {
147
- g_assert(insn_idx >= 0);
148
-
149
- switch (type) {
150
- case PLUGIN_GEN_DISABLE_MEM_HELPER:
151
- plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx);
152
- break;
153
- default:
154
- g_assert_not_reached();
155
- }
156
- break;
157
- }
158
default:
159
g_assert_not_reached();
160
}
161
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
162
index XXXXXXX..XXXXXXX 100644
163
--- a/tcg/tcg-op.c
164
+++ b/tcg/tcg-op.c
165
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mb(TCGBar mb_type)
166
}
49
}
50
- return false;
51
+ return finish_folding(ctx, op);
167
}
52
}
168
53
169
+void tcg_gen_plugin_cb(unsigned from)
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
170
+{
171
+ tcg_gen_op1(INDEX_op_plugin_cb, from);
172
+}
173
+
174
void tcg_gen_plugin_cb_start(unsigned from, unsigned type, unsigned wr)
175
{
176
tcg_gen_op3(INDEX_op_plugin_cb_start, from, type, wr);
177
--
55
--
178
2.34.1
56
2.43.0
diff view generated by jsdifflib
New patch
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
tcg/optimize.c | 20 +++++++++++++++++---
6
1 file changed, 17 insertions(+), 3 deletions(-)
7
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/optimize.c
11
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
13
return ts_info(arg_temp(arg));
14
}
15
16
+static inline bool ti_is_const(TempOptInfo *ti)
17
+{
18
+ return ti->is_const;
19
+}
20
+
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
22
+{
23
+ return ti->val;
24
+}
25
+
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
27
+{
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
29
+}
30
+
31
static inline bool ts_is_const(TCGTemp *ts)
32
{
33
- return ts_info(ts)->is_const;
34
+ return ti_is_const(ts_info(ts));
35
}
36
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
38
{
39
- TempOptInfo *ti = ts_info(ts);
40
- return ti->is_const && ti->val == val;
41
+ return ti_is_const_val(ts_info(ts), val);
42
}
43
44
static inline bool arg_is_const(TCGArg arg)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
15
return true;
16
}
17
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_z(ctx, op, z_mask);
50
}
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
New patch
1
The input which overlaps the sign bit of the output can
2
have its input s_mask propagated to the output s_mask.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 14 ++++++++++++--
8
1 file changed, 12 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
47
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
49
- return fold_masks_z(ctx, op, z_mask);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
}
17
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
36
}
37
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
39
- & arg_info(op->args[2])->s_mask;
40
- return false;
41
+ s_mask = arg_info(op->args[1])->s_mask
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
44
}
45
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
Delay test of plugin_tb->mem_helper until the inject pass.
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
3
---
6
accel/tcg/plugin-gen.c | 37 ++++++++++++++++---------------------
4
tcg/optimize.c | 6 +++---
7
1 file changed, 16 insertions(+), 21 deletions(-)
5
1 file changed, 3 insertions(+), 3 deletions(-)
8
6
9
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/plugin-gen.c
9
--- a/tcg/optimize.c
12
+++ b/accel/tcg/plugin-gen.c
10
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ enum plugin_gen_from {
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
14
PLUGIN_GEN_FROM_INSN,
12
fold_xi_to_x(ctx, op, 1)) {
15
PLUGIN_GEN_FROM_MEM,
13
return true;
16
PLUGIN_GEN_AFTER_INSN,
17
+ PLUGIN_GEN_AFTER_TB,
18
PLUGIN_GEN_N_FROMS,
19
};
20
21
@@ -XXX,XX +XXX,XX @@ static void inject_mem_enable_helper(struct qemu_plugin_tb *ptb,
22
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
23
void plugin_gen_disable_mem_helpers(void)
24
{
25
- /*
26
- * We could emit the clearing unconditionally and be done. However, this can
27
- * be wasteful if for instance plugins don't track memory accesses, or if
28
- * most TBs don't use helpers. Instead, emit the clearing iff the TB calls
29
- * helpers that might access guest memory.
30
- *
31
- * Note: we do not reset plugin_tb->mem_helper here; a TB might have several
32
- * exit points, and we want to emit the clearing from all of them.
33
- */
34
- if (!tcg_ctx->plugin_tb->mem_helper) {
35
- return;
36
+ if (tcg_ctx->plugin_insn) {
37
+ tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB);
38
}
14
}
39
- tcg_gen_st_ptr(tcg_constant_ptr(NULL), tcg_env,
15
- return false;
40
- offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
16
+ return finish_folding(ctx, op);
41
}
17
}
42
18
43
static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
44
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
45
inject_mem_enable_helper(ptb, insn, begin_op);
21
fold_xi_to_i(ctx, op, 0)) {
22
return true;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
46
}
26
}
47
27
48
-static void gen_disable_mem_helper(struct qemu_plugin_tb *ptb,
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
49
- struct qemu_plugin_insn *insn)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
50
+static void gen_disable_mem_helper(void)
30
tcg_opt_gen_movi(ctx, op2, rh, h);
51
{
31
return true;
52
- if (insn->mem_helper) {
32
}
53
- tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
33
- return false;
54
- offsetof(CPUState, plugin_mem_cbs) -
34
+ return finish_folding(ctx, op);
55
- offsetof(ArchCPU, env));
56
- }
57
+ tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
58
+ offsetof(CPUState, plugin_mem_cbs) -
59
+ offsetof(ArchCPU, env));
60
}
35
}
61
36
62
static void gen_udata_cb(struct qemu_plugin_dyn_cb *cb)
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
63
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
64
tcg_ctx->emit_before_op = op;
65
66
switch (from) {
67
+ case PLUGIN_GEN_AFTER_TB:
68
+ if (plugin_tb->mem_helper) {
69
+ gen_disable_mem_helper();
70
+ }
71
+ break;
72
+
73
case PLUGIN_GEN_AFTER_INSN:
74
assert(insn != NULL);
75
- gen_disable_mem_helper(plugin_tb, insn);
76
+ if (insn->mem_helper) {
77
+ gen_disable_mem_helper();
78
+ }
79
break;
80
81
case PLUGIN_GEN_FROM_TB:
82
--
38
--
83
2.34.1
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Be careful not to call fold_masks_zs when the memory operation
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
11
1 file changed, 21 insertions(+), 5 deletions(-)
12
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/optimize.c
16
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
18
return fold_masks_s(ctx, op, s_mask);
19
}
20
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
23
{
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
26
MemOp mop = get_memop(oi);
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
43
+
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
45
+}
46
+
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
48
+{
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
50
+ ctx->prev_mb = NULL;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
break;
57
case INDEX_op_qemu_ld_a32_i32:
58
case INDEX_op_qemu_ld_a64_i32:
59
+ done = fold_qemu_ld_1reg(&ctx, op);
60
+ break;
61
case INDEX_op_qemu_ld_a32_i64:
62
case INDEX_op_qemu_ld_a64_i64:
63
+ if (TCG_TARGET_REG_BITS == 64) {
64
+ done = fold_qemu_ld_1reg(&ctx, op);
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
73
case INDEX_op_qemu_st8_a32_i32:
74
case INDEX_op_qemu_st8_a64_i32:
75
--
76
2.43.0
diff view generated by jsdifflib
1
We have qemu_plugin_dyn_cb.type to differentiate the various
1
Stores have no output operands, and so need no further work.
2
callback types, so we do not need to keep them in separate queues.
3
2
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
include/qemu/plugin.h | 35 ++++++----------
6
tcg/optimize.c | 11 +++++------
8
accel/tcg/plugin-gen.c | 90 ++++++++++++++++++++++--------------------
7
1 file changed, 5 insertions(+), 6 deletions(-)
9
plugins/api.c | 18 +++------
10
3 files changed, 65 insertions(+), 78 deletions(-)
11
8
12
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/include/qemu/plugin.h
11
--- a/tcg/optimize.c
15
+++ b/include/qemu/plugin.h
12
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ union qemu_plugin_cb_sig {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
17
};
18
19
enum plugin_dyn_cb_type {
20
- PLUGIN_CB_INSN,
21
- PLUGIN_CB_MEM,
22
- PLUGIN_N_CB_TYPES,
23
-};
24
-
25
-enum plugin_dyn_cb_subtype {
26
PLUGIN_CB_REGULAR,
27
PLUGIN_CB_INLINE,
28
- PLUGIN_N_CB_SUBTYPES,
29
};
30
31
/*
32
@@ -XXX,XX +XXX,XX @@ enum plugin_dyn_cb_subtype {
33
*/
34
struct qemu_plugin_dyn_cb {
35
void *userp;
36
- enum plugin_dyn_cb_subtype type;
37
+ enum plugin_dyn_cb_type type;
38
/* @rw applies to mem callbacks only (both regular and inline) */
39
enum qemu_plugin_mem_rw rw;
40
/* fields specific to each dyn_cb type go here */
41
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_insn {
42
GByteArray *data;
43
uint64_t vaddr;
44
void *haddr;
45
- GArray *cbs[PLUGIN_N_CB_TYPES][PLUGIN_N_CB_SUBTYPES];
46
+ GArray *insn_cbs;
47
+ GArray *mem_cbs;
48
bool calls_helpers;
49
50
/* if set, the instruction calls helpers that might access guest memory */
51
@@ -XXX,XX +XXX,XX @@ static inline void qemu_plugin_insn_cleanup_fn(gpointer data)
52
53
static inline struct qemu_plugin_insn *qemu_plugin_insn_alloc(void)
54
{
14
{
55
- int i, j;
15
/* Opcodes that touch guest memory stop the mb optimization. */
56
struct qemu_plugin_insn *insn = g_new0(struct qemu_plugin_insn, 1);
16
ctx->prev_mb = NULL;
57
- insn->data = g_byte_array_sized_new(4);
17
- return false;
58
18
+ return true;
59
- for (i = 0; i < PLUGIN_N_CB_TYPES; i++) {
60
- for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) {
61
- insn->cbs[i][j] = g_array_new(false, false,
62
- sizeof(struct qemu_plugin_dyn_cb));
63
- }
64
- }
65
+ insn->data = g_byte_array_sized_new(4);
66
return insn;
67
}
19
}
68
20
69
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_tb {
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
70
/* if set, the TB calls helpers that might access guest memory */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
71
bool mem_helper;
23
72
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
73
- GArray *cbs[PLUGIN_N_CB_SUBTYPES];
25
remove_mem_copy_all(ctx);
74
+ GArray *cbs;
26
- return false;
75
};
27
+ return true;
76
77
/**
78
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb,
79
uint64_t pc)
80
{
81
struct qemu_plugin_insn *insn;
82
- int i, j;
83
84
if (unlikely(tb->n == tb->insns->len)) {
85
struct qemu_plugin_insn *new_insn = qemu_plugin_insn_alloc();
86
g_ptr_array_add(tb->insns, new_insn);
87
}
28
}
88
+
29
89
insn = g_ptr_array_index(tb->insns, tb->n++);
30
switch (op->opc) {
90
g_byte_array_set_size(insn->data, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
91
insn->calls_helpers = false;
32
g_assert_not_reached();
92
insn->mem_helper = false;
93
insn->vaddr = pc;
94
-
95
- for (i = 0; i < PLUGIN_N_CB_TYPES; i++) {
96
- for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) {
97
- g_array_set_size(insn->cbs[i][j], 0);
98
- }
99
+ if (insn->insn_cbs) {
100
+ g_array_set_size(insn->insn_cbs, 0);
101
+ }
102
+ if (insn->mem_cbs) {
103
+ g_array_set_size(insn->mem_cbs, 0);
104
}
33
}
105
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
106
return insn;
35
- return false;
107
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
36
+ return true;
108
index XXXXXXX..XXXXXXX 100644
37
}
109
--- a/accel/tcg/plugin-gen.c
38
110
+++ b/accel/tcg/plugin-gen.c
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
111
@@ -XXX,XX +XXX,XX @@ void plugin_gen_disable_mem_helpers(void)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
112
static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
41
TCGType type;
113
struct qemu_plugin_insn *insn)
42
114
{
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
115
- GArray *cbs[2];
44
- fold_tcg_st(ctx, op);
116
GArray *arr;
45
- return false;
117
- size_t n_cbs;
46
+ return fold_tcg_st(ctx, op);
118
+ size_t len;
119
120
/*
121
* Tracking memory accesses performed from helpers requires extra work.
122
@@ -XXX,XX +XXX,XX @@ static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
123
return;
124
}
47
}
125
48
126
- cbs[0] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
49
src = arg_temp(op->args[0]);
127
- cbs[1] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
128
- n_cbs = cbs[0]->len + cbs[1]->len;
51
last = ofs + tcg_type_size(type) - 1;
129
-
52
remove_mem_copy_in(ctx, ofs, last);
130
- if (n_cbs == 0) {
53
record_mem_copy(ctx, type, src, ofs, last);
131
+ if (!insn->mem_cbs || !insn->mem_cbs->len) {
54
- return false;
132
insn->mem_helper = false;
55
+ return true;
133
return;
134
}
135
insn->mem_helper = true;
136
ptb->mem_helper = true;
137
138
+ /*
139
+ * TODO: It seems like we should be able to use ref/unref
140
+ * to avoid needing to actually copy this array.
141
+ * Alternately, perhaps we could allocate new memory adjacent
142
+ * to the TranslationBlock itself, so that we do not have to
143
+ * actively manage the lifetime after this.
144
+ */
145
+ len = insn->mem_cbs->len;
146
arr = g_array_sized_new(false, false,
147
- sizeof(struct qemu_plugin_dyn_cb), n_cbs);
148
- g_array_append_vals(arr, cbs[0]->data, cbs[0]->len);
149
- g_array_append_vals(arr, cbs[1]->data, cbs[1]->len);
150
-
151
+ sizeof(struct qemu_plugin_dyn_cb), len);
152
+ memcpy(arr->data, insn->mem_cbs->data,
153
+ len * sizeof(struct qemu_plugin_dyn_cb));
154
qemu_plugin_add_dyn_cb_arr(arr);
155
156
tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
157
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
158
case PLUGIN_GEN_FROM_TB:
159
assert(insn == NULL);
160
161
- cbs = plugin_tb->cbs[PLUGIN_CB_REGULAR];
162
+ cbs = plugin_tb->cbs;
163
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
164
struct qemu_plugin_dyn_cb *cb =
165
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
166
- gen_udata_cb(cb);
167
- }
168
169
- cbs = plugin_tb->cbs[PLUGIN_CB_INLINE];
170
- for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
171
- struct qemu_plugin_dyn_cb *cb =
172
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
173
- gen_inline_cb(cb);
174
+ switch (cb->type) {
175
+ case PLUGIN_CB_REGULAR:
176
+ gen_udata_cb(cb);
177
+ break;
178
+ case PLUGIN_CB_INLINE:
179
+ gen_inline_cb(cb);
180
+ break;
181
+ default:
182
+ g_assert_not_reached();
183
+ }
184
}
185
break;
186
187
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
188
189
gen_enable_mem_helper(plugin_tb, insn);
190
191
- cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR];
192
+ cbs = insn->insn_cbs;
193
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
194
struct qemu_plugin_dyn_cb *cb =
195
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
196
- gen_udata_cb(cb);
197
- }
198
199
- cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE];
200
- for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
201
- struct qemu_plugin_dyn_cb *cb =
202
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
203
- gen_inline_cb(cb);
204
+ switch (cb->type) {
205
+ case PLUGIN_CB_REGULAR:
206
+ gen_udata_cb(cb);
207
+ break;
208
+ case PLUGIN_CB_INLINE:
209
+ gen_inline_cb(cb);
210
+ break;
211
+ default:
212
+ g_assert_not_reached();
213
+ }
214
}
215
break;
216
217
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
218
219
tcg_ctx->emit_before_op = op;
220
221
- cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
222
+ cbs = insn->mem_cbs;
223
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
224
struct qemu_plugin_dyn_cb *cb =
225
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
226
- if (cb->rw & rw) {
227
- gen_mem_cb(cb, meminfo, addr);
228
- }
229
- }
230
231
- cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
232
- for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
233
- struct qemu_plugin_dyn_cb *cb =
234
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
235
if (cb->rw & rw) {
236
- gen_inline_cb(cb);
237
+ switch (cb->type) {
238
+ case PLUGIN_CB_REGULAR:
239
+ gen_mem_cb(cb, meminfo, addr);
240
+ break;
241
+ case PLUGIN_CB_INLINE:
242
+ gen_inline_cb(cb);
243
+ break;
244
+ default:
245
+ g_assert_not_reached();
246
+ }
247
}
248
}
249
250
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
251
252
if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_state->event_mask)) {
253
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
254
- int i;
255
256
/* reset callbacks */
257
- for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) {
258
- if (ptb->cbs[i]) {
259
- g_array_set_size(ptb->cbs[i], 0);
260
- }
261
+ if (ptb->cbs) {
262
+ g_array_set_size(ptb->cbs, 0);
263
}
264
ptb->n = 0;
265
266
diff --git a/plugins/api.c b/plugins/api.c
267
index XXXXXXX..XXXXXXX 100644
268
--- a/plugins/api.c
269
+++ b/plugins/api.c
270
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
271
void *udata)
272
{
273
if (!tb->mem_only) {
274
- plugin_register_dyn_cb__udata(&tb->cbs[PLUGIN_CB_REGULAR],
275
- cb, flags, udata);
276
+ plugin_register_dyn_cb__udata(&tb->cbs, cb, flags, udata);
277
}
278
}
56
}
279
57
280
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
281
uint64_t imm)
282
{
283
if (!tb->mem_only) {
284
- plugin_register_inline_op_on_entry(
285
- &tb->cbs[PLUGIN_CB_INLINE], 0, op, entry, imm);
286
+ plugin_register_inline_op_on_entry(&tb->cbs, 0, op, entry, imm);
287
}
288
}
289
290
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
291
void *udata)
292
{
293
if (!insn->mem_only) {
294
- plugin_register_dyn_cb__udata(
295
- &insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], cb, flags, udata);
296
+ plugin_register_dyn_cb__udata(&insn->insn_cbs, cb, flags, udata);
297
}
298
}
299
300
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
301
uint64_t imm)
302
{
303
if (!insn->mem_only) {
304
- plugin_register_inline_op_on_entry(
305
- &insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], 0, op, entry, imm);
306
+ plugin_register_inline_op_on_entry(&insn->insn_cbs, 0, op, entry, imm);
307
}
308
}
309
310
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn,
311
enum qemu_plugin_mem_rw rw,
312
void *udata)
313
{
314
- plugin_register_vcpu_mem_cb(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR],
315
- cb, flags, rw, udata);
316
+ plugin_register_vcpu_mem_cb(&insn->mem_cbs, cb, flags, rw, udata);
317
}
318
319
void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
320
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_mem_inline_per_vcpu(
321
qemu_plugin_u64 entry,
322
uint64_t imm)
323
{
324
- plugin_register_inline_op_on_entry(
325
- &insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], rw, op, entry, imm);
326
+ plugin_register_inline_op_on_entry(&insn->mem_cbs, rw, op, entry, imm);
327
}
328
329
void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id,
330
--
59
--
331
2.34.1
60
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return finish_folding(ctx, op);
16
}
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
71
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
1
Each caller can use tcg_gen_plugin_cb directly.
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
accel/tcg/plugin-gen.c | 19 +++----------------
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 3 insertions(+), 16 deletions(-)
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
8
9
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/plugin-gen.c
11
--- a/tcg/optimize.c
12
+++ b/accel/tcg/plugin-gen.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ enum plugin_gen_from {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
PLUGIN_GEN_AFTER_TB,
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
};
15
{
16
16
uint64_t z_mask, s_mask, s_mask_old;
17
-static void plugin_gen_empty_callback(enum plugin_gen_from from)
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
-{
18
int pos = op->args[2];
19
- switch (from) {
19
int len = op->args[3];
20
- case PLUGIN_GEN_AFTER_INSN:
20
21
- case PLUGIN_GEN_FROM_TB:
21
- if (arg_is_const(op->args[1])) {
22
- case PLUGIN_GEN_FROM_INSN:
22
- uint64_t t;
23
- tcg_gen_plugin_cb(from);
24
- break;
25
- default:
26
- g_assert_not_reached();
27
- }
28
-}
29
-
23
-
30
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
24
- t = arg_info(op->args[1])->val;
31
void plugin_gen_disable_mem_helpers(void)
25
- t = sextract64(t, pos, len);
32
{
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
33
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
27
+ if (ti_is_const(t1)) {
34
ptb->mem_only = mem_only;
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
35
ptb->mem_helper = false;
29
+ sextract64(ti_const_val(t1), pos, len));
36
37
- plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB);
38
+ tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
39
}
30
}
40
31
41
tcg_ctx->plugin_insn = NULL;
32
- z_mask = arg_info(op->args[1])->z_mask;
42
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
33
- z_mask = sextract64(z_mask, pos, len);
43
insn->haddr = ptb->haddr2 + pc - ptb->vaddr2;
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
44
}
46
}
45
47
46
- plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
48
- return fold_masks(ctx, op);
47
+ tcg_gen_plugin_cb(PLUGIN_GEN_FROM_INSN);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
48
}
51
}
49
52
50
void plugin_gen_insn_end(void)
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
51
{
52
- plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN);
53
+ tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_INSN);
54
}
55
56
/*
57
--
54
--
58
2.34.1
55
2.43.0
59
60
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 27 ++++++++++++++-------------
7
1 file changed, 14 insertions(+), 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
24
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
28
+ t2 = arg_info(op->args[2]);
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
66
break;
67
default:
68
break;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
74
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
16
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t s_mask, z_mask, sign;
20
+ uint64_t s_mask, z_mask;
21
TempOptInfo *t1, *t2;
22
23
if (fold_const2(ctx, op) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
25
* If the sign bit is known zero, then logical right shift
26
* will not reduce the number of input sign repetitions.
27
*/
28
- sign = -s_mask;
29
- if (sign && !(z_mask & sign)) {
30
+ if (~z_mask & -s_mask) {
31
return fold_masks_s(ctx, op, s_mask);
32
}
33
break;
34
--
35
2.43.0
diff view generated by jsdifflib
New patch
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 9 ++++++---
8
1 file changed, 6 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
15
fold_sub_to_neg(ctx, op)) {
16
return true;
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
21
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
{
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
38
}
39
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
41
--
42
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 16 +++++++++-------
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
3
---
4
accel/tcg/plugin-gen.c | 31 ++++---------------------------
4
tcg/optimize.c | 2 +-
5
1 file changed, 4 insertions(+), 27 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
6
6
7
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
9
--- a/accel/tcg/plugin-gen.c
9
--- a/tcg/optimize.c
10
+++ b/accel/tcg/plugin-gen.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
12
* Injecting the desired instrumentation could be done with a second
12
TCGType type;
13
* translation pass that combined the instrumentation requests, but that
13
14
* would be ugly and inefficient since we would decode the guest code twice.
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
15
- * Instead, during TB translation we add "empty" instrumentation calls for all
15
- return false;
16
- * possible instrumentation events, and then once we collect the instrumentation
16
+ return finish_folding(ctx, op);
17
- * requests from plugins, we either "fill in" those empty events or remove them
17
}
18
- * if they have no requests.
18
19
- *
19
type = ctx->type;
20
- * When "filling in" an event we first copy the empty callback's TCG ops. This
21
- * might seem unnecessary, but it is done to support an arbitrary number
22
- * of callbacks per event. Take for example a regular instruction callback.
23
- * We first generate a callback to an empty helper function. Then, if two
24
- * plugins register one callback each for this instruction, we make two copies
25
- * of the TCG ops generated for the empty callback, substituting the function
26
- * pointer that points to the empty helper function with the plugins' desired
27
- * callback functions. After that we remove the empty callback's ops.
28
- *
29
- * Note that the location in TCGOp.args[] of the pointer to a helper function
30
- * varies across different guest and host architectures. Instead of duplicating
31
- * the logic that figures this out, we rely on the fact that the empty
32
- * callbacks point to empty functions that are unique pointers in the program.
33
- * Thus, to find the right location we just have to look for a match in
34
- * TCGOp.args[]. This is the main reason why we first copy an empty callback's
35
- * TCG ops and then fill them in; regardless of whether we have one or many
36
- * callbacks for that event, the logic to add all of them is the same.
37
- *
38
- * When generating more than one callback per event, we make a small
39
- * optimization to avoid generating redundant operations. For instance, for the
40
- * second and all subsequent callbacks of an event, we do not need to reload the
41
- * CPU's index into a TCG temp, since the first callback did it already.
42
+ * Instead, during TB translation we add "plugin_cb" marker opcodes
43
+ * for all possible instrumentation events, and then once we collect the
44
+ * instrumentation requests from plugins, we generate code for those markers
45
+ * or remove them if they have no requests.
46
*/
47
#include "qemu/osdep.h"
48
#include "qemu/plugin.h"
49
--
20
--
50
2.34.1
21
2.43.0
diff view generated by jsdifflib
1
Merge qemu_plugin_insn_alloc and qemu_plugin_tb_insn_get into
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
plugin_gen_insn_start, since it is used nowhere else.
2
Remove fold_masks as the function becomes unused.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/qemu/plugin.h | 39 ---------------------------------------
7
tcg/optimize.c | 18 ++++++++----------
8
accel/tcg/plugin-gen.c | 39 ++++++++++++++++++++++++++++++++-------
8
1 file changed, 8 insertions(+), 10 deletions(-)
9
2 files changed, 32 insertions(+), 46 deletions(-)
10
9
11
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/include/qemu/plugin.h
12
--- a/tcg/optimize.c
14
+++ b/include/qemu/plugin.h
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static inline void qemu_plugin_insn_cleanup_fn(gpointer data)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
16
g_byte_array_free(insn->data, true);
15
return fold_masks_zs(ctx, op, -1, s_mask);
17
}
16
}
18
17
19
-static inline struct qemu_plugin_insn *qemu_plugin_insn_alloc(void)
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
20
-{
19
-{
21
- struct qemu_plugin_insn *insn = g_new0(struct qemu_plugin_insn, 1);
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
22
-
23
- insn->data = g_byte_array_sized_new(4);
24
- return insn;
25
-}
21
-}
26
-
22
-
27
/* Internal context for this TranslationBlock */
23
/*
28
struct qemu_plugin_tb {
24
* An "affected" mask bit is 0 if and only if the result is identical
29
GPtrArray *insns;
25
* to the first input. Thus if the entire mask is 0, the operation
30
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_tb {
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
31
GArray *cbs;
27
32
};
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
33
34
-/**
35
- * qemu_plugin_tb_insn_get(): get next plugin record for translation.
36
- * @tb: the internal tb context
37
- * @pc: address of instruction
38
- */
39
-static inline
40
-struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb,
41
- uint64_t pc)
42
-{
43
- struct qemu_plugin_insn *insn;
44
-
45
- if (unlikely(tb->n == tb->insns->len)) {
46
- struct qemu_plugin_insn *new_insn = qemu_plugin_insn_alloc();
47
- g_ptr_array_add(tb->insns, new_insn);
48
- }
49
-
50
- insn = g_ptr_array_index(tb->insns, tb->n++);
51
- g_byte_array_set_size(insn->data, 0);
52
- insn->calls_helpers = false;
53
- insn->mem_helper = false;
54
- insn->vaddr = pc;
55
- if (insn->insn_cbs) {
56
- g_array_set_size(insn->insn_cbs, 0);
57
- }
58
- if (insn->mem_cbs) {
59
- g_array_set_size(insn->mem_cbs, 0);
60
- }
61
-
62
- return insn;
63
-}
64
-
65
/**
66
* struct CPUPluginState - per-CPU state for plugins
67
* @event_mask: plugin event bitmap. Modified only via async work.
68
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/accel/tcg/plugin-gen.c
71
+++ b/accel/tcg/plugin-gen.c
72
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
73
void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
74
{
29
{
75
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
30
+ uint64_t z_mask, s_mask;
76
- struct qemu_plugin_insn *pinsn;
31
+ TempOptInfo *t1, *t2;
77
+ struct qemu_plugin_insn *insn;
78
+ size_t n = db->num_insns;
79
+ vaddr pc;
80
81
- pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next);
82
- tcg_ctx->plugin_insn = pinsn;
83
- plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
84
+ assert(n >= 1);
85
+ ptb->n = n;
86
+ if (n <= ptb->insns->len) {
87
+ insn = g_ptr_array_index(ptb->insns, n - 1);
88
+ g_byte_array_set_size(insn->data, 0);
89
+ } else {
90
+ assert(n - 1 == ptb->insns->len);
91
+ insn = g_new0(struct qemu_plugin_insn, 1);
92
+ insn->data = g_byte_array_sized_new(4);
93
+ g_ptr_array_add(ptb->insns, insn);
94
+ }
95
+
32
+
96
+ tcg_ctx->plugin_insn = insn;
33
if (fold_const2_commutative(ctx, op) ||
97
+ insn->calls_helpers = false;
34
fold_xx_to_i(ctx, op, 0) ||
98
+ insn->mem_helper = false;
35
fold_xi_to_x(ctx, op, 0) ||
99
+ if (insn->insn_cbs) {
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
100
+ g_array_set_size(insn->insn_cbs, 0);
37
return true;
101
+ }
102
+ if (insn->mem_cbs) {
103
+ g_array_set_size(insn->mem_cbs, 0);
104
+ }
105
+
106
+ pc = db->pc_next;
107
+ insn->vaddr = pc;
108
109
/*
110
* Detect page crossing to get the new host address.
111
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
112
* fetching instructions from a region not backed by RAM.
113
*/
114
if (ptb->haddr1 == NULL) {
115
- pinsn->haddr = NULL;
116
+ insn->haddr = NULL;
117
} else if (is_same_page(db, db->pc_next)) {
118
- pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
119
+ insn->haddr = ptb->haddr1 + pc - ptb->vaddr;
120
} else {
121
if (ptb->vaddr2 == -1) {
122
ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
123
get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2);
124
}
125
- pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
126
+ insn->haddr = ptb->haddr2 + pc - ptb->vaddr2;
127
}
38
}
128
+
39
129
+ plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN);
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
130
}
50
}
131
51
132
void plugin_gen_insn_end(void)
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
133
--
53
--
134
2.34.1
54
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
3
---
4
accel/tcg/plugin-gen.c | 84 +++++++++++++++++++++---------------------
4
tcg/optimize.c | 2 +-
5
1 file changed, 41 insertions(+), 43 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
6
6
7
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
9
--- a/accel/tcg/plugin-gen.c
9
--- a/tcg/optimize.c
10
+++ b/accel/tcg/plugin-gen.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static void gen_mem_cb(struct qemu_plugin_dyn_cb *cb,
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
12
tcg_temp_free_i32(cpu_index);
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
13
}
17
}
14
18
15
+static void inject_cb(struct qemu_plugin_dyn_cb *cb)
19
/* Propagate constants and copies, fold constant expressions. */
16
+
17
+{
18
+ switch (cb->type) {
19
+ case PLUGIN_CB_REGULAR:
20
+ gen_udata_cb(cb);
21
+ break;
22
+ case PLUGIN_CB_INLINE:
23
+ gen_inline_cb(cb);
24
+ break;
25
+ default:
26
+ g_assert_not_reached();
27
+ }
28
+}
29
+
30
+static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb,
31
+ enum qemu_plugin_mem_rw rw,
32
+ qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
33
+{
34
+ if (cb->rw & rw) {
35
+ switch (cb->type) {
36
+ case PLUGIN_CB_MEM_REGULAR:
37
+ gen_mem_cb(cb, meminfo, addr);
38
+ break;
39
+ default:
40
+ inject_cb(cb);
41
+ break;
42
+ }
43
+ }
44
+}
45
+
46
static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
47
{
48
TCGOp *op, *next;
49
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
50
51
cbs = plugin_tb->cbs;
52
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
53
- struct qemu_plugin_dyn_cb *cb =
54
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
55
-
56
- switch (cb->type) {
57
- case PLUGIN_CB_REGULAR:
58
- gen_udata_cb(cb);
59
- break;
60
- case PLUGIN_CB_INLINE:
61
- gen_inline_cb(cb);
62
- break;
63
- default:
64
- g_assert_not_reached();
65
- }
66
+ inject_cb(
67
+ &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
68
}
69
break;
70
71
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
72
73
cbs = insn->insn_cbs;
74
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
75
- struct qemu_plugin_dyn_cb *cb =
76
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
77
-
78
- switch (cb->type) {
79
- case PLUGIN_CB_REGULAR:
80
- gen_udata_cb(cb);
81
- break;
82
- case PLUGIN_CB_INLINE:
83
- gen_inline_cb(cb);
84
- break;
85
- default:
86
- g_assert_not_reached();
87
- }
88
+ inject_cb(
89
+ &g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
90
}
91
break;
92
93
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
94
{
95
TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0]));
96
qemu_plugin_meminfo_t meminfo = op->args[1];
97
+ enum qemu_plugin_mem_rw rw =
98
+ (qemu_plugin_mem_is_store(meminfo)
99
+ ? QEMU_PLUGIN_MEM_W : QEMU_PLUGIN_MEM_R);
100
struct qemu_plugin_insn *insn;
101
const GArray *cbs;
102
- int i, n, rw;
103
+ int i, n;
104
105
assert(insn_idx >= 0);
106
insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
107
- rw = qemu_plugin_mem_is_store(meminfo) ? 2 : 1;
108
109
tcg_ctx->emit_before_op = op;
110
111
cbs = insn->mem_cbs;
112
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
113
- struct qemu_plugin_dyn_cb *cb =
114
- &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
115
-
116
- if (cb->rw & rw) {
117
- switch (cb->type) {
118
- case PLUGIN_CB_MEM_REGULAR:
119
- gen_mem_cb(cb, meminfo, addr);
120
- break;
121
- case PLUGIN_CB_INLINE:
122
- gen_inline_cb(cb);
123
- break;
124
- default:
125
- g_assert_not_reached();
126
- }
127
- }
128
+ inject_mem_cb(&g_array_index(cbs, struct qemu_plugin_dyn_cb, i),
129
+ rw, meminfo, addr);
130
}
131
132
tcg_ctx->emit_before_op = NULL;
133
--
20
--
134
2.34.1
21
2.43.0
diff view generated by jsdifflib
New patch
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 ++----
8
1 file changed, 2 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
21
-
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
1
Use different enumerators for vcpu_udata and vcpu_mem callbacks.
1
All mask setting is now done with parameters via fold_masks_*.
2
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
include/qemu/plugin.h | 1 +
6
tcg/optimize.c | 13 -------------
7
accel/tcg/plugin-gen.c | 2 +-
7
1 file changed, 13 deletions(-)
8
plugins/core.c | 4 ++--
9
3 files changed, 4 insertions(+), 3 deletions(-)
10
8
11
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/include/qemu/plugin.h
11
--- a/tcg/optimize.c
14
+++ b/include/qemu/plugin.h
12
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ union qemu_plugin_cb_sig {
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
16
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
17
enum plugin_dyn_cb_type {
15
18
PLUGIN_CB_REGULAR,
16
/* In flight values from optimization. */
19
+ PLUGIN_CB_MEM_REGULAR,
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
20
PLUGIN_CB_INLINE,
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
21
};
19
TCGType type;
22
20
} OptContext;
23
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
21
24
index XXXXXXX..XXXXXXX 100644
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
25
--- a/accel/tcg/plugin-gen.c
23
for (i = 0; i < nb_oargs; i++) {
26
+++ b/accel/tcg/plugin-gen.c
24
TCGTemp *ts = arg_temp(op->args[i]);
27
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
25
reset_ts(ctx, ts);
28
26
- /*
29
if (cb->rw & rw) {
27
- * Save the corresponding known-zero/sign bits mask for the
30
switch (cb->type) {
28
- * first output argument (only one supported so far).
31
- case PLUGIN_CB_REGULAR:
29
- */
32
+ case PLUGIN_CB_MEM_REGULAR:
30
- if (i == 0) {
33
gen_mem_cb(cb, meminfo, addr);
31
- ts_info(ts)->z_mask = ctx->z_mask;
34
break;
32
- }
35
case PLUGIN_CB_INLINE:
33
}
36
diff --git a/plugins/core.c b/plugins/core.c
34
return true;
37
index XXXXXXX..XXXXXXX 100644
35
}
38
--- a/plugins/core.c
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
39
+++ b/plugins/core.c
37
ctx.type = TCG_TYPE_I32;
40
@@ -XXX,XX +XXX,XX @@ void plugin_register_vcpu_mem_cb(GArray **arr,
41
42
struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
43
dyn_cb->userp = udata;
44
- dyn_cb->type = PLUGIN_CB_REGULAR;
45
+ dyn_cb->type = PLUGIN_CB_MEM_REGULAR;
46
dyn_cb->rw = rw;
47
dyn_cb->regular.f.vcpu_mem = cb;
48
49
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
50
break;
51
}
38
}
52
switch (cb->type) {
39
53
- case PLUGIN_CB_REGULAR:
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
54
+ case PLUGIN_CB_MEM_REGULAR:
41
- ctx.z_mask = -1;
55
cb->regular.f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw),
42
- ctx.s_mask = 0;
56
vaddr, cb->userp);
43
-
57
break;
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
58
--
47
--
59
2.34.1
48
2.43.0
diff view generated by jsdifflib
1
Since we no longer emit plugin helpers during the initial code
1
All instances of s_mask have been converted to the new
2
translation phase, we don't need to specially mark plugin helpers.
2
representation. We can now re-enable usage.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/tcg/tcg.h | 2 --
7
tcg/optimize.c | 4 ++--
8
plugins/core.c | 10 ++++------
8
1 file changed, 2 insertions(+), 2 deletions(-)
9
tcg/tcg.c | 4 +---
10
3 files changed, 5 insertions(+), 11 deletions(-)
11
9
12
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/include/tcg/tcg.h
12
--- a/tcg/optimize.c
15
+++ b/include/tcg/tcg.h
13
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef TCGv_ptr TCGv_env;
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
17
#define TCG_CALL_NO_SIDE_EFFECTS 0x0004
15
g_assert_not_reached();
18
/* Helper is G_NORETURN. */
19
#define TCG_CALL_NO_RETURN 0x0008
20
-/* Helper is part of Plugins. */
21
-#define TCG_CALL_PLUGIN 0x0010
22
23
/* convenience version of most used call flags */
24
#define TCG_CALL_NO_RWG TCG_CALL_NO_READ_GLOBALS
25
diff --git a/plugins/core.c b/plugins/core.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/plugins/core.c
28
+++ b/plugins/core.c
29
@@ -XXX,XX +XXX,XX @@ void plugin_register_dyn_cb__udata(GArray **arr,
30
void *udata)
31
{
32
static TCGHelperInfo info[3] = {
33
- [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG | TCG_CALL_PLUGIN,
34
- [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG | TCG_CALL_PLUGIN,
35
- [QEMU_PLUGIN_CB_RW_REGS].flags = TCG_CALL_PLUGIN,
36
+ [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG,
37
+ [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG,
38
/*
39
* Match qemu_plugin_vcpu_udata_cb_t:
40
* void (*)(uint32_t, void *)
41
@@ -XXX,XX +XXX,XX @@ void plugin_register_vcpu_mem_cb(GArray **arr,
42
!__builtin_types_compatible_p(qemu_plugin_meminfo_t, int32_t));
43
44
static TCGHelperInfo info[3] = {
45
- [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG | TCG_CALL_PLUGIN,
46
- [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG | TCG_CALL_PLUGIN,
47
- [QEMU_PLUGIN_CB_RW_REGS].flags = TCG_CALL_PLUGIN,
48
+ [QEMU_PLUGIN_CB_NO_REGS].flags = TCG_CALL_NO_RWG,
49
+ [QEMU_PLUGIN_CB_R_REGS].flags = TCG_CALL_NO_WG,
50
/*
51
* Match qemu_plugin_vcpu_mem_cb_t:
52
* void (*)(uint32_t, qemu_plugin_meminfo_t, uint64_t, void *)
53
diff --git a/tcg/tcg.c b/tcg/tcg.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/tcg.c
56
+++ b/tcg/tcg.c
57
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_callN(void *func, TCGHelperInfo *info,
58
59
#ifdef CONFIG_PLUGIN
60
/* Flag helpers that may affect guest state */
61
- if (tcg_ctx->plugin_insn &&
62
- !(info->flags & TCG_CALL_PLUGIN) &&
63
- !(info->flags & TCG_CALL_NO_SIDE_EFFECTS)) {
64
+ if (tcg_ctx->plugin_insn && !(info->flags & TCG_CALL_NO_SIDE_EFFECTS)) {
65
tcg_ctx->plugin_insn->calls_helpers = true;
66
}
16
}
67
#endif
17
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
20
return true;
21
}
22
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
24
s_mask = s_mask_old >> pos;
25
s_mask |= -1ull << (len - 1);
26
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
return true;
30
}
31
68
--
32
--
69
2.34.1
33
2.43.0
diff view generated by jsdifflib
1
The DEBUG_PLUGIN_GEN_OPS ifdef is replaced with "-d op_plugin".
1
The big comment just above says functions should be sorted.
2
The second pr_ops call can be obtained with "-d op".
2
Add forward declarations as needed.
3
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
6
---
7
include/qemu/log.h | 1 +
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
8
include/tcg/tcg.h | 1 +
8
1 file changed, 59 insertions(+), 55 deletions(-)
9
accel/tcg/plugin-gen.c | 67 +++++++-----------------------------------
10
tcg/tcg.c | 29 +++++++++++++++++-
11
util/log.c | 4 +++
12
5 files changed, 45 insertions(+), 57 deletions(-)
13
9
14
diff --git a/include/qemu/log.h b/include/qemu/log.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/include/qemu/log.h
12
--- a/tcg/optimize.c
17
+++ b/include/qemu/log.h
13
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ bool qemu_log_separate(void);
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
19
#define LOG_STRACE (1 << 19)
15
* 3) those that produce information about the result value.
20
#define LOG_PER_THREAD (1 << 20)
16
*/
21
#define CPU_LOG_TB_VPU (1 << 21)
17
22
+#define LOG_TB_OP_PLUGIN (1 << 22)
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
23
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
24
/* Lock/unlock output. */
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
25
21
+
26
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
22
static bool fold_add(OptContext *ctx, TCGOp *op)
27
index XXXXXXX..XXXXXXX 100644
23
{
28
--- a/include/tcg/tcg.h
24
if (fold_const2_commutative(ctx, op) ||
29
+++ b/include/tcg/tcg.h
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
30
@@ -XXX,XX +XXX,XX @@ static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n)
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
31
}
27
}
32
28
33
bool tcg_can_emit_vecop_list(const TCGOpcode *, TCGType, unsigned);
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
34
+void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs);
30
+{
35
31
+ /* If true and false values are the same, eliminate the cmp. */
36
#endif /* TCG_H */
32
+ if (args_are_copies(op->args[2], op->args[3])) {
37
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
38
index XXXXXXX..XXXXXXX 100644
34
+ }
39
--- a/accel/tcg/plugin-gen.c
35
+
40
+++ b/accel/tcg/plugin-gen.c
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
41
@@ -XXX,XX +XXX,XX @@
37
+ uint64_t tv = arg_info(op->args[2])->val;
42
*/
38
+ uint64_t fv = arg_info(op->args[3])->val;
43
#include "qemu/osdep.h"
39
+
44
#include "qemu/plugin.h"
40
+ if (tv == -1 && fv == 0) {
45
+#include "qemu/log.h"
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
46
#include "cpu.h"
42
+ }
47
#include "tcg/tcg.h"
43
+ if (tv == 0 && fv == -1) {
48
#include "tcg/tcg-temp-internal.h"
44
+ if (TCG_TARGET_HAS_not_vec) {
49
@@ -XXX,XX +XXX,XX @@ static void gen_mem_cb(struct qemu_plugin_dyn_cb *cb,
45
+ op->opc = INDEX_op_not_vec;
50
tcg_temp_free_i32(cpu_index);
46
+ return fold_not(ctx, op);
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
51
+ }
52
+ }
53
+ }
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
82
+}
83
+
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
89
}
52
90
53
-/* #define DEBUG_PLUGIN_GEN_OPS */
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
54
-static void pr_ops(void)
55
-{
92
-{
56
-#ifdef DEBUG_PLUGIN_GEN_OPS
93
- /* If true and false values are the same, eliminate the cmp. */
57
- TCGOp *op;
94
- if (args_are_copies(op->args[2], op->args[3])) {
58
- int i = 0;
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
96
- }
59
-
97
-
60
- QTAILQ_FOREACH(op, &tcg_ctx->ops, link) {
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
61
- const char *name = "";
99
- uint64_t tv = arg_info(op->args[2])->val;
62
- const char *type = "";
100
- uint64_t fv = arg_info(op->args[3])->val;
63
-
101
-
64
- if (op->opc == INDEX_op_plugin_cb_start) {
102
- if (tv == -1 && fv == 0) {
65
- switch (op->args[0]) {
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
66
- case PLUGIN_GEN_FROM_TB:
104
- }
67
- name = "tb";
105
- if (tv == 0 && fv == -1) {
68
- break;
106
- if (TCG_TARGET_HAS_not_vec) {
69
- case PLUGIN_GEN_FROM_INSN:
107
- op->opc = INDEX_op_not_vec;
70
- name = "insn";
108
- return fold_not(ctx, op);
71
- break;
109
- } else {
72
- case PLUGIN_GEN_FROM_MEM:
110
- op->opc = INDEX_op_xor_vec;
73
- name = "mem";
111
- op->args[2] = arg_new_constant(ctx, -1);
74
- break;
112
- return fold_xor(ctx, op);
75
- case PLUGIN_GEN_AFTER_INSN:
76
- name = "after insn";
77
- break;
78
- default:
79
- break;
80
- }
81
- switch (op->args[1]) {
82
- case PLUGIN_GEN_CB_UDATA:
83
- type = "udata";
84
- break;
85
- case PLUGIN_GEN_CB_INLINE:
86
- type = "inline";
87
- break;
88
- case PLUGIN_GEN_CB_MEM:
89
- type = "mem";
90
- break;
91
- case PLUGIN_GEN_ENABLE_MEM_HELPER:
92
- type = "enable mem helper";
93
- break;
94
- case PLUGIN_GEN_DISABLE_MEM_HELPER:
95
- type = "disable mem helper";
96
- break;
97
- default:
98
- break;
99
- }
113
- }
100
- }
114
- }
101
- printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type);
102
- i++;
103
- }
115
- }
104
-#endif
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
105
-}
144
-}
106
-
145
-
107
static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
146
/* Propagate constants and copies, fold constant expressions. */
147
void tcg_optimize(TCGContext *s)
108
{
148
{
109
TCGOp *op, *next;
110
int insn_idx = -1;
111
112
- pr_ops();
113
+ if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN)
114
+ && qemu_log_in_addr_range(plugin_tb->vaddr))) {
115
+ FILE *logfile = qemu_log_trylock();
116
+ if (logfile) {
117
+ fprintf(logfile, "OP before plugin injection:\n");
118
+ tcg_dump_ops(tcg_ctx, logfile, false);
119
+ fprintf(logfile, "\n");
120
+ qemu_log_unlock(logfile);
121
+ }
122
+ }
123
124
/*
125
* While injecting code, we cannot afford to reuse any ebb temps
126
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
127
break;
128
}
129
}
130
- pr_ops();
131
}
132
133
bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
134
diff --git a/tcg/tcg.c b/tcg/tcg.c
135
index XXXXXXX..XXXXXXX 100644
136
--- a/tcg/tcg.c
137
+++ b/tcg/tcg.c
138
@@ -XXX,XX +XXX,XX @@ static const char bswap_flag_name[][6] = {
139
[TCG_BSWAP_IZ | TCG_BSWAP_OS] = "iz,os",
140
};
141
142
+#ifdef CONFIG_PLUGIN
143
+static const char * const plugin_from_name[] = {
144
+ "from-tb",
145
+ "from-insn",
146
+ "after-insn",
147
+ "after-tb",
148
+};
149
+#endif
150
+
151
static inline bool tcg_regset_single(TCGRegSet d)
152
{
153
return (d & (d - 1)) == 0;
154
@@ -XXX,XX +XXX,XX @@ static inline TCGReg tcg_regset_first(TCGRegSet d)
155
#define ne_fprintf(...) \
156
({ int ret_ = fprintf(__VA_ARGS__); ret_ >= 0 ? ret_ : 0; })
157
158
-static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
159
+void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
160
{
161
char buf[128];
162
TCGOp *op;
163
@@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
164
i = k = 1;
165
}
166
break;
167
+#ifdef CONFIG_PLUGIN
168
+ case INDEX_op_plugin_cb:
169
+ {
170
+ TCGArg from = op->args[k++];
171
+ const char *name = NULL;
172
+
173
+ if (from < ARRAY_SIZE(plugin_from_name)) {
174
+ name = plugin_from_name[from];
175
+ }
176
+ if (name) {
177
+ col += ne_fprintf(f, "%s", name);
178
+ } else {
179
+ col += ne_fprintf(f, "$0x%" TCG_PRIlx, from);
180
+ }
181
+ i = 1;
182
+ }
183
+ break;
184
+#endif
185
default:
186
i = 0;
187
break;
188
diff --git a/util/log.c b/util/log.c
189
index XXXXXXX..XXXXXXX 100644
190
--- a/util/log.c
191
+++ b/util/log.c
192
@@ -XXX,XX +XXX,XX @@ const QEMULogItem qemu_log_items[] = {
193
"show micro ops after optimization" },
194
{ CPU_LOG_TB_OP_IND, "op_ind",
195
"show micro ops before indirect lowering" },
196
+#ifdef CONFIG_PLUGIN
197
+ { LOG_TB_OP_PLUGIN, "op_plugin",
198
+ "show micro ops before plugin injection" },
199
+#endif
200
{ CPU_LOG_INT, "int",
201
"show interrupts/exceptions in short format" },
202
{ CPU_LOG_EXEC, "exec",
203
--
149
--
204
2.34.1
150
2.43.0
diff view generated by jsdifflib
1
These opcodes are no longer used.
1
The big comment just above says functions should be sorted.
2
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
include/tcg/tcg-op-common.h | 2 --
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
7
include/tcg/tcg-opc.h | 2 --
7
1 file changed, 30 insertions(+), 30 deletions(-)
8
accel/tcg/plugin-gen.c | 18 ------------------
9
tcg/tcg-op.c | 10 ----------
10
4 files changed, 32 deletions(-)
11
8
12
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/include/tcg/tcg-op-common.h
11
--- a/tcg/optimize.c
15
+++ b/include/tcg/tcg-op-common.h
12
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void);
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
17
14
return true;
18
void tcg_gen_plugin_cb(unsigned from);
19
void tcg_gen_plugin_mem_cb(TCGv_i64 addr, unsigned meminfo);
20
-void tcg_gen_plugin_cb_start(unsigned from, unsigned type, unsigned wr);
21
-void tcg_gen_plugin_cb_end(void);
22
23
/* 32 bit ops */
24
25
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/include/tcg/tcg-opc.h
28
+++ b/include/tcg/tcg-opc.h
29
@@ -XXX,XX +XXX,XX @@ DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
30
31
DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
32
DEF(plugin_mem_cb, 0, 1, 1, TCG_OPF_NOT_PRESENT)
33
-DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT)
34
-DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT)
35
36
/* Replicate ld/st ops for 32 and 64-bit guest addresses. */
37
DEF(qemu_ld_a32_i32, 1, 1, 1,
38
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/accel/tcg/plugin-gen.c
41
+++ b/accel/tcg/plugin-gen.c
42
@@ -XXX,XX +XXX,XX @@
43
#include "exec/plugin-gen.h"
44
#include "exec/translator.h"
45
46
-/*
47
- * plugin_cb_start TCG op args[]:
48
- * 0: enum plugin_gen_from
49
- * 1: enum plugin_gen_cb
50
- * 2: set to 1 for mem callback that is a write, 0 otherwise.
51
- */
52
-
53
enum plugin_gen_from {
54
PLUGIN_GEN_FROM_TB,
55
PLUGIN_GEN_FROM_INSN,
56
PLUGIN_GEN_AFTER_INSN,
57
PLUGIN_GEN_AFTER_TB,
58
- PLUGIN_GEN_N_FROMS,
59
-};
60
-
61
-enum plugin_gen_cb {
62
- PLUGIN_GEN_CB_UDATA,
63
- PLUGIN_GEN_CB_UDATA_R,
64
- PLUGIN_GEN_CB_INLINE,
65
- PLUGIN_GEN_CB_MEM,
66
- PLUGIN_GEN_ENABLE_MEM_HELPER,
67
- PLUGIN_GEN_DISABLE_MEM_HELPER,
68
- PLUGIN_GEN_N_CBS,
69
};
70
71
static void plugin_gen_empty_callback(enum plugin_gen_from from)
72
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/tcg/tcg-op.c
75
+++ b/tcg/tcg-op.c
76
@@ -XXX,XX +XXX,XX @@ void tcg_gen_plugin_mem_cb(TCGv_i64 addr, unsigned meminfo)
77
tcg_gen_op2(INDEX_op_plugin_mem_cb, tcgv_i64_arg(addr), meminfo);
78
}
15
}
79
16
80
-void tcg_gen_plugin_cb_start(unsigned from, unsigned type, unsigned wr)
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
18
+{
19
+ /* Canonicalize the comparison to put immediate second. */
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
22
+ }
23
+ return finish_folding(ctx, op);
24
+}
25
+
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
27
+{
28
+ /* If true and false values are the same, eliminate the cmp. */
29
+ if (args_are_copies(op->args[3], op->args[4])) {
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
31
+ }
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
45
+}
46
+
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
48
{
49
uint64_t z_mask, s_mask;
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
52
}
53
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
81
-{
55
-{
82
- tcg_gen_op3(INDEX_op_plugin_cb_start, from, type, wr);
56
- /* Canonicalize the comparison to put immediate second. */
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
58
- op->args[3] = tcg_swap_cond(op->args[3]);
59
- }
60
- return finish_folding(ctx, op);
83
-}
61
-}
84
-
62
-
85
-void tcg_gen_plugin_cb_end(void)
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
86
-{
64
-{
87
- tcg_emit_op(INDEX_op_plugin_cb_end, 0);
65
- /* If true and false values are the same, eliminate the cmp. */
66
- if (args_are_copies(op->args[3], op->args[4])) {
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
68
- }
69
-
70
- /* Canonicalize the comparison to put immediate second. */
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
88
-}
82
-}
89
-
83
-
90
/* 32 bit ops */
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
91
85
{
92
void tcg_gen_discard_i32(TCGv_i32 arg)
86
uint64_t z_mask, s_mask, s_mask_old;
93
--
87
--
94
2.34.1
88
2.43.0
diff view generated by jsdifflib
New patch
1
1
We currently have a flag, float_muladd_halve_result, to scale
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/fpu/softfloat.h | 6 ++++
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
9
fpu/softfloat-parts.c.inc | 7 +++--
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/fpu/softfloat.h
15
+++ b/include/fpu/softfloat.h
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
17
float16 float16_sub(float16, float16, float_status *status);
18
float16 float16_mul(float16, float16, float_status *status);
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
20
+float16 float16_muladd_scalbn(float16, float16, float16,
21
+ int, int, float_status *status);
22
float16 float16_div(float16, float16, float_status *status);
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
99
+}
100
+
101
+float32 QEMU_SOFTFLOAT_ATTR
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
103
+ int scale, int flags, float_status *status)
104
{
105
FloatParts64 pa, pb, pc, *pr;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
187
}
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
190
index XXXXXXX..XXXXXXX 100644
191
--- a/fpu/softfloat-parts.c.inc
192
+++ b/fpu/softfloat-parts.c.inc
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
194
* Requires A and C extracted into a double-sized structure to provide the
195
* extra space for the widening multiply.
196
*/
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
217
--
218
2.43.0
219
220
diff view generated by jsdifflib
1
The out-of-line function pointer is mutually exclusive
1
Use the scalbn interface instead of float_muladd_halve_result.
2
with inline expansion, so move it into the union.
3
Wrap the pointer in a structure named 'regular' to match
4
PLUGIN_CB_REGULAR.
5
2
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
include/qemu/plugin.h | 4 +++-
6
target/arm/tcg/helper-a64.c | 6 +++---
11
accel/tcg/plugin-gen.c | 4 ++--
7
1 file changed, 3 insertions(+), 3 deletions(-)
12
plugins/core.c | 8 ++++----
13
3 files changed, 9 insertions(+), 7 deletions(-)
14
8
15
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
16
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
17
--- a/include/qemu/plugin.h
11
--- a/target/arm/tcg/helper-a64.c
18
+++ b/include/qemu/plugin.h
12
+++ b/target/arm/tcg/helper-a64.c
19
@@ -XXX,XX +XXX,XX @@ enum plugin_dyn_cb_subtype {
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
20
* instance of a callback to be called upon the execution of a particular TB.
14
(float16_is_infinity(b) && float16_is_zero(a))) {
21
*/
15
return float16_one_point_five;
22
struct qemu_plugin_dyn_cb {
23
- union qemu_plugin_cb_sig f;
24
void *userp;
25
enum plugin_dyn_cb_subtype type;
26
/* @rw applies to mem callbacks only (both regular and inline) */
27
enum qemu_plugin_mem_rw rw;
28
/* fields specific to each dyn_cb type go here */
29
union {
30
+ struct {
31
+ union qemu_plugin_cb_sig f;
32
+ } regular;
33
struct {
34
qemu_plugin_u64 entry;
35
enum qemu_plugin_op op;
36
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/accel/tcg/plugin-gen.c
39
+++ b/accel/tcg/plugin-gen.c
40
@@ -XXX,XX +XXX,XX @@ static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
41
}
16
}
42
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
43
/* call */
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
44
- op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx);
45
+ op = copy_call(&begin_op, op, cb->regular.f.vcpu_udata, cb_idx);
46
47
return op;
48
}
19
}
49
@@ -XXX,XX +XXX,XX @@ static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb,
20
50
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
51
if (type == PLUGIN_GEN_CB_MEM) {
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
52
/* call */
23
(float32_is_infinity(b) && float32_is_zero(a))) {
53
- op = copy_call(&begin_op, op, cb->f.vcpu_udata, cb_idx);
24
return float32_one_point_five;
54
+ op = copy_call(&begin_op, op, cb->regular.f.vcpu_udata, cb_idx);
55
}
25
}
56
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
57
return op;
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
58
diff --git a/plugins/core.c b/plugins/core.c
59
index XXXXXXX..XXXXXXX 100644
60
--- a/plugins/core.c
61
+++ b/plugins/core.c
62
@@ -XXX,XX +XXX,XX @@ void plugin_register_dyn_cb__udata(GArray **arr,
63
64
dyn_cb->userp = udata;
65
/* Note flags are discarded as unused. */
66
- dyn_cb->f.vcpu_udata = cb;
67
+ dyn_cb->regular.f.vcpu_udata = cb;
68
dyn_cb->type = PLUGIN_CB_REGULAR;
69
}
28
}
70
29
71
@@ -XXX,XX +XXX,XX @@ void plugin_register_vcpu_mem_cb(GArray **arr,
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
72
/* Note flags are discarded as unused. */
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
73
dyn_cb->type = PLUGIN_CB_REGULAR;
32
(float64_is_infinity(b) && float64_is_zero(a))) {
74
dyn_cb->rw = rw;
33
return float64_one_point_five;
75
- dyn_cb->f.generic = cb;
34
}
76
+ dyn_cb->regular.f.vcpu_mem = cb;
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
77
}
37
}
78
38
79
/*
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
80
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
81
}
82
switch (cb->type) {
83
case PLUGIN_CB_REGULAR:
84
- cb->f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw),
85
- vaddr, cb->userp);
86
+ cb->regular.f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw),
87
+ vaddr, cb->userp);
88
break;
89
case PLUGIN_CB_INLINE:
90
exec_inline_op(cb, cpu->cpu_index);
91
--
40
--
92
2.34.1
41
2.43.0
93
42
94
43
diff view generated by jsdifflib
New patch
1
1
Use the scalbn interface instead of float_muladd_halve_result.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/sparc/helper.h | 4 +-
7
target/sparc/fop_helper.c | 8 ++--
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/helper.h
14
+++ b/target/sparc/helper.h
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
23
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
205
--
206
2.43.0
207
208
diff view generated by jsdifflib
1
Move MAX_CALL_IARGS from tcg.h and include for
1
All uses have been convered to float*_muladd_scalbn.
2
the define of TCG_TARGET_REG_BITS.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
include/tcg/helper-info.h | 3 +++
6
include/fpu/softfloat.h | 3 ---
9
include/tcg/tcg.h | 2 --
7
fpu/softfloat.c | 6 ------
10
tcg/tci.c | 1 +
8
fpu/softfloat-parts.c.inc | 4 ----
11
3 files changed, 4 insertions(+), 2 deletions(-)
9
3 files changed, 13 deletions(-)
12
10
13
diff --git a/include/tcg/helper-info.h b/include/tcg/helper-info.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/helper-info.h
13
--- a/include/fpu/softfloat.h
16
+++ b/include/tcg/helper-info.h
14
+++ b/include/fpu/softfloat.h
17
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
18
#ifdef CONFIG_TCG_INTERPRETER
16
| Using these differs from negating an input or output before calling
19
#include <ffi.h>
17
| the muladd function in that this means that a NaN doesn't have its
20
#endif
18
| sign bit inverted before it is propagated.
21
+#include "tcg-target-reg-bits.h"
19
-| We also support halving the result before rounding, as a special
22
+
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
23
+#define MAX_CALL_IARGS 7
21
*----------------------------------------------------------------------------*/
24
22
enum {
25
/*
23
float_muladd_negate_c = 1,
26
* Describe the calling convention of a given argument type.
24
float_muladd_negate_product = 2,
27
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
25
float_muladd_negate_result = 4,
26
- float_muladd_halve_result = 8,
27
};
28
29
/*----------------------------------------------------------------------------
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
28
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
29
--- a/include/tcg/tcg.h
32
--- a/fpu/softfloat.c
30
+++ b/include/tcg/tcg.h
33
+++ b/fpu/softfloat.c
31
@@ -XXX,XX +XXX,XX @@
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
32
/* XXX: make safe guess about sizes */
35
if (unlikely(!can_use_fpu(s))) {
33
#define MAX_OP_PER_INSTR 266
36
goto soft;
34
37
}
35
-#define MAX_CALL_IARGS 7
38
- if (unlikely(flags & float_muladd_halve_result)) {
36
-
39
- goto soft;
37
#define CPU_TEMP_BUF_NLONGS 128
40
- }
38
#define TCG_STATIC_FRAME_SIZE (CPU_TEMP_BUF_NLONGS * sizeof(long))
41
39
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
40
diff --git a/tcg/tci.c b/tcg/tci.c
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
41
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
42
--- a/tcg/tci.c
56
--- a/fpu/softfloat-parts.c.inc
43
+++ b/tcg/tci.c
57
+++ b/fpu/softfloat-parts.c.inc
44
@@ -XXX,XX +XXX,XX @@
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
45
59
a->exp = p_widen.exp;
46
#include "qemu/osdep.h"
60
47
#include "tcg/tcg.h"
61
return_normal:
48
+#include "tcg/helper-info.h"
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
49
#include "tcg/tcg-ldst.h"
63
- if (flags & float_muladd_halve_result) {
50
#include <ffi.h>
64
- a->exp -= 1;
51
65
- }
66
a->exp += scale;
67
finish_sign:
68
if (flags & float_muladd_negate_result) {
52
--
69
--
53
2.34.1
70
2.43.0
54
71
55
72
diff view generated by jsdifflib
New patch
1
This rounding mode is used by Hexagon.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/fpu/softfloat-types.h | 2 ++
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
8
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/fpu/softfloat-types.h
12
+++ b/include/fpu/softfloat-types.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
21
/*
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/fpu/softfloat-parts.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
36
--
37
2.43.0
diff view generated by jsdifflib
New patch
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/fpu/softfloat.h | 5 +++++
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
39
}
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.h | 1 -
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
11
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/fma_emu.h
15
+++ b/target/hexagon/fma_emu.h
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
30
}
31
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
33
-{
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
38
-}
39
-
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
48
{
49
float32 RdV;
50
arch_fpop_start(env);
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
53
arch_fpop_end(env);
54
return RdV;
55
}
56
--
57
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
1
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
1
5
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/hexagon/op_helper.c | 11 +++--------
10
1 file changed, 3 insertions(+), 8 deletions(-)
11
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/op_helper.c
15
+++ b/target/hexagon/op_helper.c
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
18
float32 RsV, float32 RtV, float32 PuV)
19
{
20
- size4s_t tmp;
21
arch_fpop_start(env);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
35
--
36
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
There are multiple special cases for this instruction.
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
8
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
11
---
4
include/qemu/plugin.h | 1 -
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
5
accel/tcg/plugin-gen.c | 286 ++++++++++-------------------------------
13
1 file changed, 26 insertions(+), 79 deletions(-)
6
plugins/api.c | 8 +-
7
3 files changed, 67 insertions(+), 228 deletions(-)
8
14
9
diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
11
--- a/include/qemu/plugin.h
17
--- a/target/hexagon/op_helper.c
12
+++ b/include/qemu/plugin.h
18
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ enum plugin_dyn_cb_type {
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
20
return RxV;
15
enum plugin_dyn_cb_subtype {
21
}
16
PLUGIN_CB_REGULAR,
22
17
- PLUGIN_CB_REGULAR_R,
23
-static bool is_zero_prod(float32 a, float32 b)
18
PLUGIN_CB_INLINE,
19
PLUGIN_N_CB_SUBTYPES,
20
};
21
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/accel/tcg/plugin-gen.c
24
+++ b/accel/tcg/plugin-gen.c
25
@@ -XXX,XX +XXX,XX @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
26
void *userdata)
27
{ }
28
29
-static void gen_empty_udata_cb(void (*gen_helper)(TCGv_i32, TCGv_ptr))
30
-{
24
-{
31
- TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
25
- return ((float32_is_zero(a) && is_finite(b)) ||
32
- TCGv_ptr udata = tcg_temp_ebb_new_ptr();
26
- (float32_is_zero(b) && is_finite(a)));
33
-
34
- tcg_gen_movi_ptr(udata, 0);
35
- tcg_gen_ld_i32(cpu_index, tcg_env,
36
- -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
- gen_helper(cpu_index, udata);
38
-
39
- tcg_temp_free_ptr(udata);
40
- tcg_temp_free_i32(cpu_index);
41
-}
27
-}
42
-
28
-
43
-static void gen_empty_udata_cb_no_wg(void)
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
44
-{
30
-{
45
- gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_wg);
31
- float32 ret = dst;
32
- if (float32_is_any_nan(x)) {
33
- if (extract32(x, 22, 1) == 0) {
34
- float_raise(float_flag_invalid, fp_status);
35
- }
36
- ret = make_float32(0xffffffff); /* nan */
37
- }
38
- return ret;
46
-}
39
-}
47
-
40
-
48
-static void gen_empty_udata_cb_no_rwg(void)
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
49
-{
42
float32 RsV, float32 RtV, float32 PuV)
50
- gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_rwg);
43
{
51
-}
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
52
-
45
return RxV;
53
/*
54
* For now we only support addi_i64.
55
* When we support more ops, we can generate one empty inline cb for each.
56
@@ -XXX,XX +XXX,XX @@ static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
57
tcg_temp_free_i32(cpu_index);
58
}
46
}
59
47
60
-/*
48
-static bool is_inf_prod(int32_t a, int32_t b)
61
- * Share the same function for enable/disable. When enabling, the NULL
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
62
- * pointer will be overwritten later.
50
+ float32 RsV, float32 RtV, int negate)
63
- */
64
-static void gen_empty_mem_helper(void)
65
-{
66
- TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
67
-
68
- tcg_gen_movi_ptr(ptr, 0);
69
- tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) -
70
- offsetof(ArchCPU, env));
71
- tcg_temp_free_ptr(ptr);
72
-}
73
-
74
static void gen_plugin_cb_start(enum plugin_gen_from from,
75
enum plugin_gen_cb type, unsigned wr)
76
{
51
{
77
tcg_gen_plugin_cb_start(from, type, wr);
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
78
}
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
79
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
80
-static void gen_wrapped(enum plugin_gen_from from,
55
+ int flags;
81
- enum plugin_gen_cb type, void (*func)(void))
82
-{
83
- gen_plugin_cb_start(from, type, 0);
84
- func();
85
- tcg_gen_plugin_cb_end();
86
-}
87
-
88
static void plugin_gen_empty_callback(enum plugin_gen_from from)
89
{
90
switch (from) {
91
case PLUGIN_GEN_AFTER_INSN:
92
case PLUGIN_GEN_FROM_TB:
93
- tcg_gen_plugin_cb(from);
94
- break;
95
case PLUGIN_GEN_FROM_INSN:
96
- /*
97
- * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
98
- * the first callback of an instruction
99
- */
100
- gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
101
- gen_empty_mem_helper);
102
- gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb_no_rwg);
103
- gen_wrapped(from, PLUGIN_GEN_CB_UDATA_R, gen_empty_udata_cb_no_wg);
104
- gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
105
+ tcg_gen_plugin_cb(from);
106
break;
107
default:
108
g_assert_not_reached();
109
@@ -XXX,XX +XXX,XX @@ static TCGOp *copy_mul_i32(TCGOp **begin_op, TCGOp *op, uint32_t v)
110
return op;
111
}
112
113
-static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op)
114
-{
115
- if (UINTPTR_MAX == UINT32_MAX) {
116
- /* st_i32 */
117
- op = copy_op(begin_op, op, INDEX_op_st_i32);
118
- } else {
119
- /* st_i64 */
120
- op = copy_st_i64(begin_op, op);
121
- }
122
- return op;
123
-}
124
-
125
static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx)
126
{
127
TCGOp *old_op;
128
@@ -XXX,XX +XXX,XX @@ static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *func, int *cb_idx)
129
return op;
130
}
131
132
-/*
133
- * When we append/replace ops here we are sensitive to changing patterns of
134
- * TCGOps generated by the tcg_gen_FOO calls when we generated the
135
- * empty callbacks. This will assert very quickly in a debug build as
136
- * we assert the ops we are replacing are the correct ones.
137
- */
138
-static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb,
139
- TCGOp *begin_op, TCGOp *op, int *cb_idx)
140
-{
141
- /* const_ptr */
142
- op = copy_const_ptr(&begin_op, op, cb->userp);
143
-
144
- /* copy the ld_i32, but note that we only have to copy it once */
145
- if (*cb_idx == -1) {
146
- op = copy_op(&begin_op, op, INDEX_op_ld_i32);
147
- } else {
148
- begin_op = QTAILQ_NEXT(begin_op, link);
149
- tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32);
150
- }
151
-
152
- /* call */
153
- op = copy_call(&begin_op, op, cb->regular.f.vcpu_udata, cb_idx);
154
-
155
- return op;
156
-}
157
-
158
static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb,
159
TCGOp *begin_op, TCGOp *op,
160
int *unused)
161
@@ -XXX,XX +XXX,XX @@ typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb,
162
TCGOp *begin_op, TCGOp *op, int *intp);
163
typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb);
164
165
-static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
166
-{
167
- return true;
168
-}
169
-
170
static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb)
171
{
172
int w;
173
@@ -XXX,XX +XXX,XX @@ static void inject_cb_type(const GArray *cbs, TCGOp *begin_op,
174
rm_ops_range(begin_op, end_op);
175
}
176
177
-static void
178
-inject_udata_cb(const GArray *cbs, TCGOp *begin_op)
179
-{
180
- inject_cb_type(cbs, begin_op, append_udata_cb, op_ok);
181
-}
182
-
183
static void
184
inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok)
185
{
186
@@ -XXX,XX +XXX,XX @@ inject_mem_cb(const GArray *cbs, TCGOp *begin_op)
187
inject_cb_type(cbs, begin_op, append_mem_cb, op_rw);
188
}
189
190
-/* we could change the ops in place, but we can reuse more code by copying */
191
-static void inject_mem_helper(TCGOp *begin_op, GArray *arr)
192
-{
193
- TCGOp *orig_op = begin_op;
194
- TCGOp *end_op;
195
- TCGOp *op;
196
-
197
- end_op = find_op(begin_op, INDEX_op_plugin_cb_end);
198
- tcg_debug_assert(end_op);
199
-
200
- /* const ptr */
201
- op = copy_const_ptr(&begin_op, end_op, arr);
202
-
203
- /* st_ptr */
204
- op = copy_st_ptr(&begin_op, op);
205
-
206
- rm_ops_range(orig_op, end_op);
207
-}
208
-
209
-/*
210
- * Tracking memory accesses performed from helpers requires extra work.
211
- * If an instruction is emulated with helpers, we do two things:
212
- * (1) copy the CB descriptors, and keep track of it so that they can be
213
- * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so
214
- * that we can read them at run-time (i.e. when the helper executes).
215
- * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
216
- *
217
- * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
218
- * is possible that the code we generate after the instruction is
219
- * dead, we also add checks before generating tb_exit etc.
220
- */
221
-static void inject_mem_enable_helper(struct qemu_plugin_tb *ptb,
222
- struct qemu_plugin_insn *plugin_insn,
223
- TCGOp *begin_op)
224
-{
225
- GArray *cbs[2];
226
- GArray *arr;
227
- size_t n_cbs, i;
228
-
229
- cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
230
- cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
231
-
232
- n_cbs = 0;
233
- for (i = 0; i < ARRAY_SIZE(cbs); i++) {
234
- n_cbs += cbs[i]->len;
235
- }
236
-
237
- plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs;
238
- if (likely(!plugin_insn->mem_helper)) {
239
- rm_ops(begin_op);
240
- return;
241
- }
242
- ptb->mem_helper = true;
243
-
244
- arr = g_array_sized_new(false, false,
245
- sizeof(struct qemu_plugin_dyn_cb), n_cbs);
246
-
247
- for (i = 0; i < ARRAY_SIZE(cbs); i++) {
248
- g_array_append_vals(arr, cbs[i]->data, cbs[i]->len);
249
- }
250
-
251
- qemu_plugin_add_dyn_cb_arr(arr);
252
- inject_mem_helper(begin_op, arr);
253
-}
254
-
255
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
256
void plugin_gen_disable_mem_helpers(void)
257
{
258
@@ -XXX,XX +XXX,XX @@ void plugin_gen_disable_mem_helpers(void)
259
}
260
}
261
262
-static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
263
- TCGOp *begin_op, int insn_idx)
264
-{
265
- struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
266
-
267
- inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
268
-}
269
-
270
-static void plugin_gen_insn_udata_r(const struct qemu_plugin_tb *ptb,
271
- TCGOp *begin_op, int insn_idx)
272
-{
273
- struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
274
-
275
- inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR_R], begin_op);
276
-}
277
-
278
-static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
279
- TCGOp *begin_op, int insn_idx)
280
-{
281
- struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
282
- inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE],
283
- begin_op, op_ok);
284
-}
285
-
286
static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb,
287
TCGOp *begin_op, int insn_idx)
288
{
289
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb,
290
inject_inline_cb(cbs, begin_op, op_rw);
291
}
292
293
-static void plugin_gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
294
- TCGOp *begin_op, int insn_idx)
295
+static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
296
+ struct qemu_plugin_insn *insn)
297
{
298
- struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
299
- inject_mem_enable_helper(ptb, insn, begin_op);
300
+ GArray *cbs[2];
301
+ GArray *arr;
302
+ size_t n_cbs;
303
+
56
+
304
+ /*
57
+ arch_fpop_start(env);
305
+ * Tracking memory accesses performed from helpers requires extra work.
58
+
306
+ * If an instruction is emulated with helpers, we do two things:
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
307
+ * (1) copy the CB descriptors, and keep track of it so that they can be
60
+ RxV = float32_muladd(RsV, RtV, RxV,
308
+ * freed later on, and (2) point CPUState.plugin_mem_cbs to the
61
+ negate | float_muladd_suppress_add_product_zero,
309
+ * descriptors, so that we can read them at run-time
62
+ &env->fp_status);
310
+ * (i.e. when the helper executes).
63
+
311
+ * This run-time access is performed from qemu_plugin_vcpu_mem_cb.
64
+ flags = get_float_exception_flags(&env->fp_status);
312
+ *
65
+ if (flags) {
313
+ * Note that plugin_gen_disable_mem_helpers undoes (2). Since it
66
+ /* Flags are suppressed by this instruction. */
314
+ * is possible that the code we generate after the instruction is
67
+ set_float_exception_flags(0, &env->fp_status);
315
+ * dead, we also add checks before generating tb_exit etc.
68
+
316
+ */
69
+ /* Return 0 for Inf - Inf. */
317
+ if (!insn->calls_helpers) {
70
+ if (flags & float_flag_invalid_isi) {
318
+ return;
71
+ RxV = 0;
72
+ }
319
+ }
73
+ }
320
+
74
+
321
+ cbs[0] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR];
75
+ arch_fpop_end(env);
322
+ cbs[1] = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE];
76
+ return RxV;
323
+ n_cbs = cbs[0]->len + cbs[1]->len;
324
+
325
+ if (n_cbs == 0) {
326
+ insn->mem_helper = false;
327
+ return;
328
+ }
329
+ insn->mem_helper = true;
330
+ ptb->mem_helper = true;
331
+
332
+ arr = g_array_sized_new(false, false,
333
+ sizeof(struct qemu_plugin_dyn_cb), n_cbs);
334
+ g_array_append_vals(arr, cbs[0]->data, cbs[0]->len);
335
+ g_array_append_vals(arr, cbs[1]->data, cbs[1]->len);
336
+
337
+ qemu_plugin_add_dyn_cb_arr(arr);
338
+
339
+ tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
340
+ offsetof(CPUState, plugin_mem_cbs) -
341
+ offsetof(ArchCPU, env));
342
}
77
}
343
78
344
static void gen_disable_mem_helper(void)
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
345
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
80
float32 RsV, float32 RtV)
346
}
81
{
347
break;
82
- bool infinp;
348
83
- bool infminusinf;
349
+ case PLUGIN_GEN_FROM_INSN:
84
- float32 tmp;
350
+ assert(insn != NULL);
351
+
352
+ gen_enable_mem_helper(plugin_tb, insn);
353
+
354
+ cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR];
355
+ for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
356
+ struct qemu_plugin_dyn_cb *cb =
357
+ &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
358
+ gen_udata_cb(cb);
359
+ }
360
+
361
+ cbs = insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE];
362
+ for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
363
+ struct qemu_plugin_dyn_cb *cb =
364
+ &g_array_index(cbs, struct qemu_plugin_dyn_cb, i);
365
+ gen_inline_cb(cb);
366
+ }
367
+ break;
368
+
369
default:
370
g_assert_not_reached();
371
}
372
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
373
enum plugin_gen_cb type = op->args[1];
374
375
switch (from) {
376
- case PLUGIN_GEN_FROM_INSN:
377
- {
378
- g_assert(insn_idx >= 0);
379
-
85
-
380
- switch (type) {
86
- arch_fpop_start(env);
381
- case PLUGIN_GEN_CB_UDATA:
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
382
- plugin_gen_insn_udata(plugin_tb, op, insn_idx);
88
- infminusinf = float32_is_infinity(RxV) &&
383
- break;
89
- is_inf_prod(RsV, RtV) &&
384
- case PLUGIN_GEN_CB_UDATA_R:
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
385
- plugin_gen_insn_udata_r(plugin_tb, op, insn_idx);
91
- infinp = float32_is_infinity(RxV) ||
386
- break;
92
- float32_is_infinity(RtV) ||
387
- case PLUGIN_GEN_CB_INLINE:
93
- float32_is_infinity(RsV);
388
- plugin_gen_insn_inline(plugin_tb, op, insn_idx);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
389
- break;
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
390
- case PLUGIN_GEN_ENABLE_MEM_HELPER:
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
391
- plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
392
- break;
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
393
- default:
99
- RxV = tmp;
394
- g_assert_not_reached();
100
- }
395
- }
101
- set_float_exception_flags(0, &env->fp_status);
396
- break;
102
- if (float32_is_infinity(RxV) && !infinp) {
397
- }
103
- RxV = RxV - 1;
398
case PLUGIN_GEN_FROM_MEM:
104
- }
399
{
105
- if (infminusinf) {
400
g_assert(insn_idx >= 0);
106
- RxV = 0;
401
diff --git a/plugins/api.c b/plugins/api.c
107
- }
402
index XXXXXXX..XXXXXXX 100644
108
- arch_fpop_end(env);
403
--- a/plugins/api.c
109
- return RxV;
404
+++ b/plugins/api.c
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
405
@@ -XXX,XX +XXX,XX @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
111
}
406
void *udata)
112
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
114
float32 RsV, float32 RtV)
407
{
115
{
408
if (!insn->mem_only) {
116
- bool infinp;
409
- int index = flags == QEMU_PLUGIN_CB_R_REGS ||
117
- bool infminusinf;
410
- flags == QEMU_PLUGIN_CB_RW_REGS ?
118
- float32 tmp;
411
- PLUGIN_CB_REGULAR_R : PLUGIN_CB_REGULAR;
412
-
119
-
413
- plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][index],
120
- arch_fpop_start(env);
414
- cb, flags, udata);
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
415
+ plugin_register_dyn_cb__udata(
122
- infminusinf = float32_is_infinity(RxV) &&
416
+ &insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], cb, flags, udata);
123
- is_inf_prod(RsV, RtV) &&
417
}
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
125
- infinp = float32_is_infinity(RxV) ||
126
- float32_is_infinity(RtV) ||
127
- float32_is_infinity(RsV);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
418
}
146
}
419
147
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
420
--
149
--
421
2.34.1
150
2.43.0
diff view generated by jsdifflib
New patch
1
The function is now unused.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/fma_emu.h | 2 -
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.h
13
+++ b/target/hexagon/fma_emu.h
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
15
}
16
int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
19
- int scale, float_status *fp_status);
20
float64 internal_mpyhh(float64 a, float64 b,
21
unsigned long long int accumulated,
22
float_status *fp_status);
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/hexagon/fma_emu.c
26
+++ b/target/hexagon/fma_emu.c
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
28
return -1;
29
}
30
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
219
--
220
2.43.0
diff view generated by jsdifflib
New patch
1
This massive macro is now only used once.
2
Expand it for use only by float64.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
8
1 file changed, 127 insertions(+), 128 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
15
}
16
17
/* Return a maximum finite value with the requested sign */
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
20
-{ \
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
23
- /* result zero */ \
24
- switch (fp_status->float_rounding_mode) { \
25
- case float_round_down: \
26
- return zero_##SUFFIX(1); \
27
- default: \
28
- return zero_##SUFFIX(0); \
29
- } \
30
- } \
31
- /* Normalize right */ \
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
271
}
272
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
278
--
279
2.43.0
diff view generated by jsdifflib
New patch
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 16 +++-------------
8
1 file changed, 3 insertions(+), 13 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ typedef union {
15
};
16
} Double;
17
18
-typedef union {
19
- float f;
20
- uint32_t i;
21
- struct {
22
- uint32_t mant:23;
23
- uint32_t exp:8;
24
- uint32_t sign:1;
25
- };
26
-} Float;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
Double a = { .i = f64 };
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
32
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
This structure, with bitfields, is incorrect for big-endian.
2
Use extract64 and deposit64 instead.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
8
1 file changed, 16 insertions(+), 30 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@
15
16
#define WAY_BIG_EXP 4096
17
18
-typedef union {
19
- double f;
20
- uint64_t i;
21
- struct {
22
- uint64_t mant:52;
23
- uint64_t exp:11;
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
- Double a = { .i = f64 };
31
+ uint64_t mant = extract64(f64, 0, 52);
32
if (float64_is_normal(f64)) {
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
44
}
45
46
int32_t float64_getexp(float64 f64)
47
{
48
- Double a = { .i = f64 };
49
+ int exp = extract64(f64, 52, 11);
50
if (float64_is_normal(f64)) {
51
- return a.exp;
52
+ return exp;
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
64
+ uint64_t ret;
65
+
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
67
&& ((a.guard | a.round | a.sticky) == 0)) {
68
/* result zero */
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
70
}
71
}
72
/* Underflow? */
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
74
+ ret = int128_getlo(a.mant);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
97
}
98
99
float64 internal_mpyhh(float64 a, float64 b,
100
--
101
2.43.0
diff view generated by jsdifflib
New patch
1
No need to open-code 64x64->128-bit multiplication.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
7
1 file changed, 3 insertions(+), 29 deletions(-)
8
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/fma_emu.c
12
+++ b/target/hexagon/fma_emu.c
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
14
return -1;
15
}
16
17
-static uint32_t int128_getw0(Int128 x)
18
-{
19
- return int128_getlo(x);
20
-}
21
-
22
-static uint32_t int128_getw1(Int128 x)
23
-{
24
- return int128_getlo(x) >> 32;
25
-}
26
-
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
28
{
29
- Int128 a, b;
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
31
+ uint64_t l, h;
32
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
39
-
40
- pp1s = pp1a + pp1b;
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
42
- pp2 += (1ULL << 32);
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
47
- }
48
-
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
50
+ mulu64(&l, &h, ai, bi);
51
+ return int128_make128(l, h);
52
}
53
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
55
--
56
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
plugins/core.c | 2 +-
7
target/hexagon/fma_emu.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
6
9
7
diff --git a/plugins/core.c b/plugins/core.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/plugins/core.c
12
--- a/target/hexagon/fma_emu.c
10
+++ b/plugins/core.c
13
+++ b/target/hexagon/fma_emu.c
11
@@ -XXX,XX +XXX,XX @@ static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
12
GArray *cbs = *arr;
15
float64_is_infinity(b)) {
13
16
return float64_mul(a, b, fp_status);
14
if (!cbs) {
15
- cbs = g_array_sized_new(false, false,
16
+ cbs = g_array_sized_new(false, true,
17
sizeof(struct qemu_plugin_dyn_cb), 1);
18
*arr = cbs;
19
}
17
}
18
- x.mant = int128_mul_6464(accumulated, 1);
19
+ x.mant = int128_make64(accumulated);
20
x.sticky = sticky;
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
20
--
23
--
21
2.34.1
24
2.43.0
22
23
diff view generated by jsdifflib
1
For normal helpers, read the function pointer from the
1
Convert all targets simultaneously, as the gen_intermediate_code
2
structure earlier. For plugins, this will allow the
2
function disappears from the target. While there are possible
3
function pointer to come from elsewhere.
3
workarounds, they're larger than simply performing the conversion.
4
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/tcg/tcg.h | 21 +++++++++-------
8
include/exec/translator.h | 14 --------------
10
include/exec/helper-gen.h.inc | 24 ++++++++++++-------
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
11
tcg/tcg.c | 45 +++++++++++++++++++----------------
10
target/alpha/cpu.h | 2 ++
12
3 files changed, 52 insertions(+), 38 deletions(-)
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
13
71
14
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
15
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg.h
74
--- a/include/exec/translator.h
17
+++ b/include/tcg/tcg.h
75
+++ b/include/exec/translator.h
18
@@ -XXX,XX +XXX,XX @@ typedef struct TCGTargetOpDef {
19
20
bool tcg_op_supported(TCGOpcode op);
21
22
-void tcg_gen_call0(TCGHelperInfo *, TCGTemp *ret);
23
-void tcg_gen_call1(TCGHelperInfo *, TCGTemp *ret, TCGTemp *);
24
-void tcg_gen_call2(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *);
25
-void tcg_gen_call3(TCGHelperInfo *, TCGTemp *ret, TCGTemp *,
26
+void tcg_gen_call0(void *func, TCGHelperInfo *, TCGTemp *ret);
27
+void tcg_gen_call1(void *func, TCGHelperInfo *, TCGTemp *ret, TCGTemp *);
28
+void tcg_gen_call2(void *func, TCGHelperInfo *, TCGTemp *ret,
29
TCGTemp *, TCGTemp *);
30
-void tcg_gen_call4(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
31
- TCGTemp *, TCGTemp *);
32
-void tcg_gen_call5(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
33
+void tcg_gen_call3(void *func, TCGHelperInfo *, TCGTemp *ret,
34
TCGTemp *, TCGTemp *, TCGTemp *);
35
-void tcg_gen_call6(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
36
+void tcg_gen_call4(void *func, TCGHelperInfo *, TCGTemp *ret,
37
TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *);
38
-void tcg_gen_call7(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
39
+void tcg_gen_call5(void *func, TCGHelperInfo *, TCGTemp *ret,
40
TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *);
41
+void tcg_gen_call6(void *func, TCGHelperInfo *, TCGTemp *ret,
42
+ TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *,
43
+ TCGTemp *, TCGTemp *);
44
+void tcg_gen_call7(void *func, TCGHelperInfo *, TCGTemp *ret,
45
+ TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *,
46
+ TCGTemp *, TCGTemp *, TCGTemp *);
47
48
TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs);
49
void tcg_op_remove(TCGContext *s, TCGOp *op);
50
diff --git a/include/exec/helper-gen.h.inc b/include/exec/helper-gen.h.inc
51
index XXXXXXX..XXXXXXX 100644
52
--- a/include/exec/helper-gen.h.inc
53
+++ b/include/exec/helper-gen.h.inc
54
@@ -XXX,XX +XXX,XX @@
76
@@ -XXX,XX +XXX,XX @@
55
extern TCGHelperInfo glue(helper_info_, name); \
77
#include "qemu/bswap.h"
56
static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
78
#include "exec/vaddr.h"
57
{ \
79
58
- tcg_gen_call0(&glue(helper_info_, name), dh_retvar(ret)); \
80
-/**
59
+ tcg_gen_call0(glue(helper_info_,name).func, \
81
- * gen_intermediate_code
60
+ &glue(helper_info_,name), dh_retvar(ret)); \
82
- * @cpu: cpu context
83
- * @tb: translation block
84
- * @max_insns: max number of instructions to translate
85
- * @pc: guest virtual program counter address
86
- * @host_pc: host physical program counter address
87
- *
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
90
- */
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
93
-
94
/**
95
* DisasJumpType:
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
61
}
152
}
62
153
63
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
154
void avr_cpu_tcg_init(void);
64
@@ -XXX,XX +XXX,XX @@ extern TCGHelperInfo glue(helper_info_, name); \
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
65
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
156
+ int *max_insns, vaddr pc, void *host_pc);
66
dh_arg_decl(t1, 1)) \
157
67
{ \
158
int cpu_avr_exec(CPUState *cpu);
68
- tcg_gen_call1(&glue(helper_info_, name), dh_retvar(ret), \
159
69
+ tcg_gen_call1(glue(helper_info_,name).func, \
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
70
+ &glue(helper_info_,name), dh_retvar(ret), \
161
index XXXXXXX..XXXXXXX 100644
71
dh_arg(t1, 1)); \
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
72
}
178
}
73
179
74
@@ -XXX,XX +XXX,XX @@ extern TCGHelperInfo glue(helper_info_, name); \
180
void hppa_translate_init(void);
75
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
76
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
182
+ int *max_insns, vaddr pc, void *host_pc);
77
{ \
183
78
- tcg_gen_call2(&glue(helper_info_, name), dh_retvar(ret), \
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
79
+ tcg_gen_call2(glue(helper_info_,name).func, \
185
80
+ &glue(helper_info_,name), dh_retvar(ret), \
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
81
dh_arg(t1, 1), dh_arg(t2, 2)); \
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
82
}
230
}
83
231
84
@@ -XXX,XX +XXX,XX @@ extern TCGHelperInfo glue(helper_info_, name); \
232
void mb_tcg_init(void);
85
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
86
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
234
+ int *max_insns, vaddr pc, void *host_pc);
87
{ \
235
88
- tcg_gen_call3(&glue(helper_info_, name), dh_retvar(ret), \
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
89
+ tcg_gen_call3(glue(helper_info_,name).func, \
237
90
+ &glue(helper_info_,name), dh_retvar(ret), \
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
91
dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3)); \
239
index XXXXXXX..XXXXXXX 100644
92
}
240
--- a/target/mips/tcg/tcg-internal.h
93
241
+++ b/target/mips/tcg/tcg-internal.h
94
@@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
242
@@ -XXX,XX +XXX,XX @@
95
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
243
#include "cpu.h"
96
dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
244
97
{ \
245
void mips_tcg_init(void);
98
- tcg_gen_call4(&glue(helper_info_, name), dh_retvar(ret), \
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
99
+ tcg_gen_call4(glue(helper_info_,name).func, \
247
+ int *max_insns, vaddr pc, void *host_pc);
100
+ &glue(helper_info_,name), dh_retvar(ret), \
248
101
dh_arg(t1, 1), dh_arg(t2, 2), \
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
102
dh_arg(t3, 3), dh_arg(t4, 4)); \
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
103
}
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
104
@@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
252
index XXXXXXX..XXXXXXX 100644
105
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
253
--- a/target/openrisc/cpu.h
106
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
254
+++ b/target/openrisc/cpu.h
107
{ \
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
108
- tcg_gen_call5(&glue(helper_info_, name), dh_retvar(ret), \
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
109
+ tcg_gen_call5(glue(helper_info_,name).func, \
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
110
+ &glue(helper_info_,name), dh_retvar(ret), \
258
void openrisc_translate_init(void);
111
dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
112
dh_arg(t4, 4), dh_arg(t5, 5)); \
260
+ int *max_insns, vaddr pc, void *host_pc);
113
}
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
114
@@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
262
115
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
263
#ifndef CONFIG_USER_ONLY
116
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
117
{ \
265
index XXXXXXX..XXXXXXX 100644
118
- tcg_gen_call6(&glue(helper_info_, name), dh_retvar(ret), \
266
--- a/target/ppc/cpu.h
119
+ tcg_gen_call6(glue(helper_info_,name).func, \
267
+++ b/target/ppc/cpu.h
120
+ &glue(helper_info_,name), dh_retvar(ret), \
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
121
dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
269
122
dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6)); \
270
/*****************************************************************************/
123
}
271
void ppc_translate_init(void);
124
@@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
125
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
273
+ int *max_insns, vaddr pc, void *host_pc);
126
dh_arg_decl(t7, 7)) \
274
127
{ \
275
#if !defined(CONFIG_USER_ONLY)
128
- tcg_gen_call7(&glue(helper_info_, name), dh_retvar(ret), \
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
129
+ tcg_gen_call7(glue(helper_info_,name).func, \
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
130
+ &glue(helper_info_,name), dh_retvar(ret), \
278
index XXXXXXX..XXXXXXX 100644
131
dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
279
--- a/target/riscv/cpu.h
132
dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
280
+++ b/target/riscv/cpu.h
133
dh_arg(t7, 7)); \
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
134
diff --git a/tcg/tcg.c b/tcg/tcg.c
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
135
index XXXXXXX..XXXXXXX 100644
283
136
--- a/tcg/tcg.c
284
void riscv_translate_init(void);
137
+++ b/tcg/tcg.c
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
138
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
286
+ int *max_insns, vaddr pc, void *host_pc);
139
287
+
140
static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
141
289
uint32_t exception, uintptr_t pc);
142
-static void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, TCGTemp **args)
290
143
+static void tcg_gen_callN(void *func, TCGHelperInfo *info,
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
144
+ TCGTemp *ret, TCGTemp **args)
292
index XXXXXXX..XXXXXXX 100644
145
{
293
--- a/target/rx/cpu.h
146
TCGv_i64 extend_free[MAX_CALL_IARGS];
294
+++ b/target/rx/cpu.h
147
int n_extend = 0;
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
148
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, TCGTemp **args)
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
149
g_assert_not_reached();
297
150
}
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
151
}
388
}
152
- op->args[pi++] = (uintptr_t)info->func;
389
153
+ op->args[pi++] = (uintptr_t)func;
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
154
op->args[pi++] = (uintptr_t)info;
391
index XXXXXXX..XXXXXXX 100644
155
tcg_debug_assert(pi == total_args);
392
--- a/accel/tcg/translate-all.c
156
393
+++ b/accel/tcg/translate-all.c
157
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, TCGTemp **args)
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
158
}
395
159
}
396
tcg_func_start(tcg_ctx);
160
397
161
-void tcg_gen_call0(TCGHelperInfo *info, TCGTemp *ret)
398
- tcg_ctx->cpu = env_cpu(env);
162
+void tcg_gen_call0(void *func, TCGHelperInfo *info, TCGTemp *ret)
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
163
{
400
+ CPUState *cs = env_cpu(env);
164
- tcg_gen_callN(info, ret, NULL);
401
+ tcg_ctx->cpu = cs;
165
+ tcg_gen_callN(func, info, ret, NULL);
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
166
}
403
+
167
404
assert(tb->size != 0);
168
-void tcg_gen_call1(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1)
405
tcg_ctx->cpu = NULL;
169
+void tcg_gen_call1(void *func, TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1)
406
*max_insns = tb->icount;
170
{
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
171
- tcg_gen_callN(info, ret, &t1);
408
/*
172
+ tcg_gen_callN(func, info, ret, &t1);
409
* Overflow of code_gen_buffer, or the current slice of it.
173
}
410
*
174
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
175
-void tcg_gen_call2(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2)
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
176
+void tcg_gen_call2(void *func, TCGHelperInfo *info, TCGTemp *ret,
413
* should we re-do the tcg optimization currently hidden
177
+ TCGTemp *t1, TCGTemp *t2)
414
* inside tcg_gen_code. All that should be required is to
178
{
415
* flush the TBs, allocate a new TB, re-initialize it per
179
TCGTemp *args[2] = { t1, t2 };
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
180
- tcg_gen_callN(info, ret, args);
417
index XXXXXXX..XXXXXXX 100644
181
+ tcg_gen_callN(func, info, ret, args);
418
--- a/target/alpha/cpu.c
182
}
419
+++ b/target/alpha/cpu.c
183
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
184
-void tcg_gen_call3(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
421
185
- TCGTemp *t2, TCGTemp *t3)
422
static const TCGCPUOps alpha_tcg_ops = {
186
+void tcg_gen_call3(void *func, TCGHelperInfo *info, TCGTemp *ret,
423
.initialize = alpha_translate_init,
187
+ TCGTemp *t1, TCGTemp *t2, TCGTemp *t3)
424
+ .translate_code = alpha_translate_code,
188
{
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
189
TCGTemp *args[3] = { t1, t2, t3 };
426
.restore_state_to_opc = alpha_restore_state_to_opc,
190
- tcg_gen_callN(info, ret, args);
427
191
+ tcg_gen_callN(func, info, ret, args);
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
192
}
429
index XXXXXXX..XXXXXXX 100644
193
430
--- a/target/alpha/translate.c
194
-void tcg_gen_call4(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
431
+++ b/target/alpha/translate.c
195
- TCGTemp *t2, TCGTemp *t3, TCGTemp *t4)
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
196
+void tcg_gen_call4(void *func, TCGHelperInfo *info, TCGTemp *ret,
433
.tb_stop = alpha_tr_tb_stop,
197
+ TCGTemp *t1, TCGTemp *t2, TCGTemp *t3, TCGTemp *t4)
434
};
198
{
435
199
TCGTemp *args[4] = { t1, t2, t3, t4 };
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
200
- tcg_gen_callN(info, ret, args);
437
- vaddr pc, void *host_pc)
201
+ tcg_gen_callN(func, info, ret, args);
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
202
}
439
+ int *max_insns, vaddr pc, void *host_pc)
203
440
{
204
-void tcg_gen_call5(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
441
DisasContext dc;
205
+void tcg_gen_call5(void *func, TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
206
TCGTemp *t2, TCGTemp *t3, TCGTemp *t4, TCGTemp *t5)
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
207
{
444
index XXXXXXX..XXXXXXX 100644
208
TCGTemp *args[5] = { t1, t2, t3, t4, t5 };
445
--- a/target/arm/cpu.c
209
- tcg_gen_callN(info, ret, args);
446
+++ b/target/arm/cpu.c
210
+ tcg_gen_callN(func, info, ret, args);
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
211
}
448
#ifdef CONFIG_TCG
212
449
static const TCGCPUOps arm_tcg_ops = {
213
-void tcg_gen_call6(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2,
450
.initialize = arm_translate_init,
214
- TCGTemp *t3, TCGTemp *t4, TCGTemp *t5, TCGTemp *t6)
451
+ .translate_code = arm_translate_code,
215
+void tcg_gen_call6(void *func, TCGHelperInfo *info, TCGTemp *ret,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
216
+ TCGTemp *t1, TCGTemp *t2, TCGTemp *t3,
453
.debug_excp_handler = arm_debug_excp_handler,
217
+ TCGTemp *t4, TCGTemp *t5, TCGTemp *t6)
454
.restore_state_to_opc = arm_restore_state_to_opc,
218
{
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
219
TCGTemp *args[6] = { t1, t2, t3, t4, t5, t6 };
456
index XXXXXXX..XXXXXXX 100644
220
- tcg_gen_callN(info, ret, args);
457
--- a/target/arm/tcg/cpu-v7m.c
221
+ tcg_gen_callN(func, info, ret, args);
458
+++ b/target/arm/tcg/cpu-v7m.c
222
}
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
223
460
224
-void tcg_gen_call7(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
461
static const TCGCPUOps arm_v7m_tcg_ops = {
225
+void tcg_gen_call7(void *func, TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
462
.initialize = arm_translate_init,
226
TCGTemp *t2, TCGTemp *t3, TCGTemp *t4,
463
+ .translate_code = arm_translate_code,
227
TCGTemp *t5, TCGTemp *t6, TCGTemp *t7)
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
228
{
465
.debug_excp_handler = arm_debug_excp_handler,
229
TCGTemp *args[7] = { t1, t2, t3, t4, t5, t6, t7 };
466
.restore_state_to_opc = arm_restore_state_to_opc,
230
- tcg_gen_callN(info, ret, args);
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
231
+ tcg_gen_callN(func, info, ret, args);
468
index XXXXXXX..XXXXXXX 100644
232
}
469
--- a/target/arm/tcg/translate.c
233
470
+++ b/target/arm/tcg/translate.c
234
static void tcg_reg_alloc_start(TCGContext *s)
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
563
#endif
564
};
565
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
567
- vaddr pc, void *host_pc)
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
569
+ int *max_insns, vaddr pc, void *host_pc)
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
235
--
953
--
236
2.34.1
954
2.43.0
237
955
238
956
diff view generated by jsdifflib