1
This is mostly my code_gen_buffer cleanup, plus a few other random
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
changes thrown in. Including a fix for a recent float32_exp2 bug.
3
2
4
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
5
r~
6
7
8
The following changes since commit 894fc4fd670aaf04a67dc7507739f914ff4bacf2:
9
10
Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging (2021-06-11 09:21:48 +0100)
11
4
12
are available in the Git repository at:
5
are available in the Git repository at:
13
6
14
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210611
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
15
8
16
for you to fetch changes up to 60afaddc208d34f6dc86dd974f6e02724fba6eb6:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
17
10
18
docs/devel: Explain in more detail the TB chaining mechanisms (2021-06-11 09:41:25 -0700)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
19
12
20
----------------------------------------------------------------
13
----------------------------------------------------------------
21
Clean up code_gen_buffer allocation.
14
tcg/optimize: Remove in-flight mask data from OptContext
22
Add tcg_remove_ops_after.
15
fpu: Add float*_muladd_scalbn
23
Fix tcg_constant_* documentation.
16
fpu: Remove float_muladd_halve_result
24
Improve TB chaining documentation.
17
fpu: Add float_round_nearest_even_max
25
Fix float32_exp2.
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
26
21
27
----------------------------------------------------------------
22
----------------------------------------------------------------
28
Jose R. Ziviani (1):
23
Ilya Leoshkevich (1):
29
tcg/arm: Fix tcg_out_op function signature
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
30
25
31
Luis Pires (1):
26
Pierrick Bouvier (1):
32
docs/devel: Explain in more detail the TB chaining mechanisms
27
plugins: optimize cpu_index code generation
33
28
34
Richard Henderson (32):
29
Richard Henderson (70):
35
meson: Split out tcg/meson.build
30
tcg/optimize: Split out finish_bb, finish_ebb
36
meson: Split out fpu/meson.build
31
tcg/optimize: Split out fold_affected_mask
37
tcg: Re-order tcg_region_init vs tcg_prologue_init
32
tcg/optimize: Copy mask writeback to fold_masks
38
tcg: Remove error return from tcg_region_initial_alloc__locked
33
tcg/optimize: Split out fold_masks_zs
39
tcg: Split out tcg_region_initial_alloc
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
40
tcg: Split out tcg_region_prologue_set
35
tcg/optimize: Change representation of s_mask
41
tcg: Split out region.c
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
42
accel/tcg: Inline cpu_gen_init
37
tcg/optimize: Introduce const value accessors for TempOptInfo
43
accel/tcg: Move alloc_code_gen_buffer to tcg/region.c
38
tcg/optimize: Use fold_masks_zs in fold_and
44
accel/tcg: Rename tcg_init to tcg_init_machine
39
tcg/optimize: Use fold_masks_zs in fold_andc
45
tcg: Create tcg_init
40
tcg/optimize: Use fold_masks_zs in fold_bswap
46
accel/tcg: Merge tcg_exec_init into tcg_init_machine
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
47
accel/tcg: Use MiB in tcg_init_machine
42
tcg/optimize: Use fold_masks_z in fold_ctpop
48
accel/tcg: Pass down max_cpus to tcg_init
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
49
tcg: Introduce tcg_max_ctxs
44
tcg/optimize: Compute sign mask in fold_deposit
50
tcg: Move MAX_CODE_GEN_BUFFER_SIZE to tcg-target.h
45
tcg/optimize: Use finish_folding in fold_divide
51
tcg: Replace region.end with region.total_size
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
52
tcg: Rename region.start to region.after_prologue
47
tcg/optimize: Use fold_masks_s in fold_eqv
53
tcg: Tidy tcg_n_regions
48
tcg/optimize: Use fold_masks_z in fold_extract
54
tcg: Tidy split_cross_256mb
49
tcg/optimize: Use finish_folding in fold_extract2
55
tcg: Move in_code_gen_buffer and tests to region.c
50
tcg/optimize: Use fold_masks_zs in fold_exts
56
tcg: Allocate code_gen_buffer into struct tcg_region_state
51
tcg/optimize: Use fold_masks_z in fold_extu
57
tcg: Return the map protection from alloc_code_gen_buffer
52
tcg/optimize: Use fold_masks_zs in fold_movcond
58
tcg: Sink qemu_madvise call to common code
53
tcg/optimize: Use finish_folding in fold_mul*
59
util/osdep: Add qemu_mprotect_rw
54
tcg/optimize: Use fold_masks_s in fold_nand
60
tcg: Round the tb_size default from qemu_get_host_physmem
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
61
tcg: Merge buffer protection and guard page protection
56
tcg/optimize: Use fold_masks_s in fold_nor
62
tcg: When allocating for !splitwx, begin with PROT_NONE
57
tcg/optimize: Use fold_masks_s in fold_not
63
tcg: Move tcg_init_ctx and tcg_ctx from accel/tcg/
58
tcg/optimize: Use fold_masks_zs in fold_or
64
tcg: Introduce tcg_remove_ops_after
59
tcg/optimize: Use fold_masks_zs in fold_orc
65
tcg: Fix documentation for tcg_constant_* vs tcg_temp_free_*
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
66
softfloat: Fix tp init in float32_exp2
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
67
100
68
docs/devel/tcg.rst | 101 ++++-
101
include/exec/translator.h | 14 -
69
meson.build | 12 +-
102
include/fpu/softfloat-types.h | 2 +
70
accel/tcg/internal.h | 2 +
103
include/fpu/softfloat.h | 14 +-
71
include/qemu/osdep.h | 1 +
104
include/hw/core/tcg-cpu-ops.h | 13 +
72
include/sysemu/tcg.h | 2 -
105
target/alpha/cpu.h | 2 +
73
include/tcg/tcg.h | 28 +-
106
target/arm/internals.h | 2 +
74
tcg/aarch64/tcg-target.h | 1 +
107
target/avr/cpu.h | 2 +
75
tcg/arm/tcg-target.h | 1 +
108
target/hexagon/cpu.h | 2 +
76
tcg/i386/tcg-target.h | 2 +
109
target/hexagon/fma_emu.h | 3 -
77
tcg/mips/tcg-target.h | 6 +
110
target/hppa/cpu.h | 2 +
78
tcg/ppc/tcg-target.h | 2 +
111
target/i386/tcg/helper-tcg.h | 2 +
79
tcg/riscv/tcg-target.h | 1 +
112
target/loongarch/internals.h | 2 +
80
tcg/s390/tcg-target.h | 3 +
113
target/m68k/cpu.h | 2 +
81
tcg/sparc/tcg-target.h | 1 +
114
target/microblaze/cpu.h | 2 +
82
tcg/tcg-internal.h | 40 ++
115
target/mips/tcg/tcg-internal.h | 2 +
83
tcg/tci/tcg-target.h | 1 +
116
target/openrisc/cpu.h | 2 +
84
accel/tcg/tcg-all.c | 32 +-
117
target/ppc/cpu.h | 2 +
85
accel/tcg/translate-all.c | 439 +-------------------
118
target/riscv/cpu.h | 3 +
86
bsd-user/main.c | 3 +-
119
target/rx/cpu.h | 2 +
87
fpu/softfloat.c | 2 +-
120
target/s390x/s390x-internal.h | 2 +
88
linux-user/main.c | 1 -
121
target/sh4/cpu.h | 2 +
89
tcg/region.c | 999 ++++++++++++++++++++++++++++++++++++++++++++++
122
target/sparc/cpu.h | 2 +
90
tcg/tcg.c | 649 +++---------------------------
123
target/sparc/helper.h | 4 +-
91
util/osdep.c | 9 +
124
target/tricore/cpu.h | 2 +
92
tcg/arm/tcg-target.c.inc | 3 +-
125
target/xtensa/cpu.h | 2 +
93
fpu/meson.build | 1 +
126
accel/tcg/cpu-exec.c | 8 +-
94
tcg/meson.build | 14 +
127
accel/tcg/plugin-gen.c | 9 +
95
27 files changed, 1266 insertions(+), 1090 deletions(-)
128
accel/tcg/translate-all.c | 8 +-
96
create mode 100644 tcg/tcg-internal.h
129
fpu/softfloat.c | 63 +--
97
create mode 100644 tcg/region.c
130
target/alpha/cpu.c | 1 +
98
create mode 100644 fpu/meson.build
131
target/alpha/translate.c | 4 +-
99
create mode 100644 tcg/meson.build
132
target/arm/cpu.c | 1 +
100
133
target/arm/tcg/cpu-v7m.c | 1 +
134
target/arm/tcg/helper-a64.c | 6 +-
135
target/arm/tcg/translate.c | 5 +-
136
target/avr/cpu.c | 1 +
137
target/avr/translate.c | 6 +-
138
target/hexagon/cpu.c | 1 +
139
target/hexagon/fma_emu.c | 496 ++++++---------------
140
target/hexagon/op_helper.c | 125 ++----
141
target/hexagon/translate.c | 4 +-
142
target/hppa/cpu.c | 1 +
143
target/hppa/translate.c | 4 +-
144
target/i386/tcg/tcg-cpu.c | 1 +
145
target/i386/tcg/translate.c | 5 +-
146
target/loongarch/cpu.c | 1 +
147
target/loongarch/tcg/translate.c | 4 +-
148
target/m68k/cpu.c | 1 +
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
New patch
1
Call them directly from the opcode switch statement in tcg_optimize,
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
9
1 file changed, 31 insertions(+), 16 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
16
}
17
}
18
19
+static void finish_bb(OptContext *ctx)
20
+{
21
+ /* We only optimize memory barriers across basic blocks. */
22
+ ctx->prev_mb = NULL;
23
+}
24
+
25
+static void finish_ebb(OptContext *ctx)
26
+{
27
+ finish_bb(ctx);
28
+ /* We only optimize across extended basic blocks. */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
30
+ remove_mem_copy_all(ctx);
31
+}
32
+
33
static void finish_folding(OptContext *ctx, TCGOp *op)
34
{
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
36
int i, nb_oargs;
37
38
- /*
39
- * We only optimize extended basic blocks. If the opcode ends a BB
40
- * and is not a conditional branch, reset all temp data.
41
- */
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
47
- }
48
- return;
49
- }
50
-
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
64
}
65
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
68
}
69
op->opc = INDEX_op_br;
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
76
+
77
+ finish_bb(ctx);
78
+ return true;
79
}
80
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
83
CASE_OP_32_64_VEC(xor):
84
done = fold_xor(&ctx, op);
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
97
--
98
2.43.0
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
1
Give the field a name reflecting its actual meaning.
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
2
5
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
8
---
7
tcg/region.c | 15 ++++++++-------
9
tcg/optimize.c | 17 ++++++++++++++---
8
1 file changed, 8 insertions(+), 7 deletions(-)
10
1 file changed, 14 insertions(+), 3 deletions(-)
9
11
10
diff --git a/tcg/region.c b/tcg/region.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/region.c
14
--- a/tcg/optimize.c
13
+++ b/tcg/region.c
15
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ struct tcg_region_state {
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
15
QemuMutex lock;
17
{
16
18
uint64_t z_mask = ctx->z_mask;
17
/* fields set at init time */
19
uint64_t s_mask = ctx->s_mask;
18
- void *start;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
19
void *start_aligned;
21
+ TCGTemp *ts;
20
+ void *after_prologue;
22
+ TempOptInfo *ti;
21
size_t n;
23
+
22
size_t size; /* size of one region */
24
+ /* Only single-output opcodes are supported here. */
23
size_t stride; /* .size + guard size */
25
+ tcg_debug_assert(def->nb_oargs == 1);
24
@@ -XXX,XX +XXX,XX @@ static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
26
25
end = start + region.size;
27
/*
26
28
* 32-bit ops generate 32-bit results, which for the purpose of
27
if (curr_region == 0) {
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
28
- start = region.start;
30
if (ctx->type == TCG_TYPE_I32) {
29
+ start = region.after_prologue;
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
30
}
35
}
31
/* The final region may have a few extra pages due to earlier rounding. */
36
32
if (curr_region == region.n - 1) {
37
if (z_mask == 0) {
33
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
34
region.n = n_regions;
39
}
35
region.size = region_size - page_size;
40
- return false;
36
region.stride = region_size;
41
+
37
- region.start = buf;
42
+ ts = arg_temp(op->args[0]);
38
+ region.after_prologue = buf;
43
+ reset_ts(ctx, ts);
39
region.start_aligned = aligned;
44
+
40
/* page-align the end, since its last page will be a guard page */
45
+ ti = ts_info(ts);
41
end = QEMU_ALIGN_PTR_DOWN(buf + total_size, page_size);
46
+ ti->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
47
+ ti->s_mask = s_mask;
43
void tcg_region_prologue_set(TCGContext *s)
48
+ return true;
44
{
45
/* Deduct the prologue from the first region. */
46
- g_assert(region.start == s->code_gen_buffer);
47
- region.start = s->code_ptr;
48
+ g_assert(region.start_aligned == s->code_gen_buffer);
49
+ region.after_prologue = s->code_ptr;
50
51
/* Recompute boundaries of the first region. */
52
tcg_region_assign(s, 0);
53
54
/* Register the balance of the buffer with gdb. */
55
- tcg_register_jit(tcg_splitwx_to_rx(region.start),
56
- region.start_aligned + region.total_size - region.start);
57
+ tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue),
58
+ region.start_aligned + region.total_size -
59
+ region.after_prologue);
60
}
49
}
61
50
62
/*
51
/*
63
--
52
--
64
2.25.1
53
2.43.0
65
66
diff view generated by jsdifflib
1
Perform both tcg_context_init and tcg_region_init.
1
Add a routine to which masks can be passed directly, rather than
2
Do not leave this split to the caller.
2
storing them into OptContext. To be used in upcoming patches.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
include/tcg/tcg.h | 3 +--
7
tcg/optimize.c | 15 ++++++++++++---
9
tcg/tcg-internal.h | 1 +
8
1 file changed, 12 insertions(+), 3 deletions(-)
10
accel/tcg/translate-all.c | 3 +--
11
tcg/tcg.c | 9 ++++++++-
12
4 files changed, 11 insertions(+), 5 deletions(-)
13
9
14
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg.h
12
--- a/tcg/optimize.c
17
+++ b/include/tcg/tcg.h
13
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ void *tcg_malloc_internal(TCGContext *s, int size);
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
19
void tcg_pool_reset(TCGContext *s);
15
return fold_const2(ctx, op);
20
TranslationBlock *tcg_tb_alloc(TCGContext *s);
21
22
-void tcg_region_init(size_t tb_size, int splitwx);
23
void tb_destroy(TranslationBlock *tb);
24
void tcg_region_reset_all(void);
25
26
@@ -XXX,XX +XXX,XX @@ static inline void *tcg_malloc(int size)
27
}
28
}
16
}
29
17
30
-void tcg_context_init(TCGContext *s);
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
31
+void tcg_init(size_t tb_size, int splitwx);
19
+/*
32
void tcg_register_thread(void);
20
+ * Record "zero" and "sign" masks for the single output of @op.
33
void tcg_prologue_init(TCGContext *s);
21
+ * See TempOptInfo definition of z_mask and s_mask.
34
void tcg_func_start(TCGContext *s);
22
+ * If z_mask allows, fold the output to constant zero.
35
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
23
+ */
36
index XXXXXXX..XXXXXXX 100644
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
37
--- a/tcg/tcg-internal.h
25
+ uint64_t z_mask, uint64_t s_mask)
38
+++ b/tcg/tcg-internal.h
39
@@ -XXX,XX +XXX,XX @@
40
extern TCGContext **tcg_ctxs;
41
extern unsigned int n_tcg_ctxs;
42
43
+void tcg_region_init(size_t tb_size, int splitwx);
44
bool tcg_region_alloc(TCGContext *s);
45
void tcg_region_initial_alloc(TCGContext *s);
46
void tcg_region_prologue_set(TCGContext *s);
47
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/accel/tcg/translate-all.c
50
+++ b/accel/tcg/translate-all.c
51
@@ -XXX,XX +XXX,XX @@ static void tb_htable_init(void)
52
void tcg_exec_init(unsigned long tb_size, int splitwx)
53
{
26
{
54
tcg_allowed = true;
27
- uint64_t z_mask = ctx->z_mask;
55
- tcg_context_init(&tcg_init_ctx);
28
- uint64_t s_mask = ctx->s_mask;
56
page_init();
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
57
tb_htable_init();
30
TCGTemp *ts;
58
- tcg_region_init(tb_size, splitwx);
31
TempOptInfo *ti;
59
+ tcg_init(tb_size, splitwx);
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
60
33
return true;
61
#if defined(CONFIG_SOFTMMU)
62
/* There's no guest base to take into account, so go ahead and
63
diff --git a/tcg/tcg.c b/tcg/tcg.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/tcg.c
66
+++ b/tcg/tcg.c
67
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s);
68
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
69
TCGReg reg, const char *name);
70
71
-void tcg_context_init(TCGContext *s)
72
+static void tcg_context_init(void)
73
{
74
+ TCGContext *s = &tcg_init_ctx;
75
int op, total_args, n, i;
76
TCGOpDef *def;
77
TCGArgConstraint *args_ct;
78
@@ -XXX,XX +XXX,XX @@ void tcg_context_init(TCGContext *s)
79
cpu_env = temp_tcgv_ptr(ts);
80
}
34
}
81
35
82
+void tcg_init(size_t tb_size, int splitwx)
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
83
+{
37
+{
84
+ tcg_context_init();
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
85
+ tcg_region_init(tb_size, splitwx);
86
+}
39
+}
87
+
40
+
88
/*
41
/*
89
* Allocate TBs right before their corresponding translated code, making
42
* An "affected" mask bit is 0 if and only if the result is identical
90
* sure that TBs and code are on different cache lines.
43
* to the first input. Thus if the entire mask is 0, the operation
91
--
44
--
92
2.25.1
45
2.43.0
93
94
diff view generated by jsdifflib
1
Do not mess around with setting values within tcg_init_ctx.
1
Consider the passed s_mask to be a minimum deduced from
2
Put the values into 'region' directly, which is where they
2
either existing s_mask or from a sign-extension operation.
3
will live for the lifetime of the program.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
4
5
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
tcg/region.c | 64 ++++++++++++++++++++++------------------------------
9
tcg/optimize.c | 21 ++++++---------------
10
1 file changed, 27 insertions(+), 37 deletions(-)
10
1 file changed, 6 insertions(+), 15 deletions(-)
11
11
12
diff --git a/tcg/region.c b/tcg/region.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/region.c
14
--- a/tcg/optimize.c
15
+++ b/tcg/region.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static size_t tree_size;
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
17
17
* Record "zero" and "sign" masks for the single output of @op.
18
bool in_code_gen_buffer(const void *p)
18
* See TempOptInfo definition of z_mask and s_mask.
19
{
19
* If z_mask allows, fold the output to constant zero.
20
- const TCGContext *s = &tcg_init_ctx;
20
+ * The passed s_mask may be augmented by z_mask.
21
/*
21
*/
22
* Much like it is valid to have a pointer to the byte past the
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
23
* end of an array (so long as you don't dereference it), allow
23
uint64_t z_mask, uint64_t s_mask)
24
* a pointer to the byte past the end of the code gen buffer.
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
*/
25
26
- return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
26
ti = ts_info(ts);
27
+ return (size_t)(p - region.start_aligned) <= region.total_size;
27
ti->z_mask = z_mask;
28
}
28
- ti->s_mask = s_mask;
29
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
30
#ifdef CONFIG_DEBUG_TCG
31
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
32
}
33
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
34
35
- tcg_ctx->code_gen_buffer = buf;
36
- tcg_ctx->code_gen_buffer_size = size;
37
+ region.start_aligned = buf;
38
+ region.total_size = size;
39
return true;
30
return true;
40
}
31
}
41
#elif defined(_WIN32)
32
42
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
43
return false;
34
default:
35
g_assert_not_reached();
44
}
36
}
45
37
- s_mask = smask_from_zmask(z_mask);
46
- tcg_ctx->code_gen_buffer = buf;
38
47
- tcg_ctx->code_gen_buffer_size = size;
39
+ s_mask = 0;
48
+ region.start_aligned = buf;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
49
+ region.total_size = size;
41
case TCG_BSWAP_OZ:
50
return true;
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
51
}
57
}
52
#else
58
53
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
54
/* Request large pages for the buffer. */
60
default:
55
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
61
g_assert_not_reached();
56
62
}
57
- tcg_ctx->code_gen_buffer = buf;
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
58
- tcg_ctx->code_gen_buffer_size = size;
64
return false;
59
+ region.start_aligned = buf;
60
+ region.total_size = size;
61
return true;
62
}
65
}
63
66
64
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
65
return false;
68
return true;
66
}
69
}
67
/* The size of the mapping may have been adjusted. */
70
ctx->z_mask = z_mask;
68
- size = tcg_ctx->code_gen_buffer_size;
71
- ctx->s_mask = smask_from_zmask(z_mask);
69
- buf_rx = tcg_ctx->code_gen_buffer;
72
70
+ buf_rx = region.start_aligned;
73
return fold_masks(ctx, op);
71
+ size = region.total_size;
74
}
72
#endif
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
73
74
buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
75
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
76
#endif
77
78
close(fd);
79
- tcg_ctx->code_gen_buffer = buf_rw;
80
- tcg_ctx->code_gen_buffer_size = size;
81
+ region.start_aligned = buf_rw;
82
+ region.total_size = size;
83
tcg_splitwx_diff = buf_rx - buf_rw;
84
85
/* Request large pages for the buffer and the splitwx. */
86
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
87
return false;
88
}
76
}
89
77
90
- buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
78
ctx->z_mask = z_mask;
91
+ buf_rw = region.start_aligned;
79
- ctx->s_mask = smask_from_zmask(z_mask);
92
buf_rx = 0;
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
93
ret = mach_vm_remap(mach_task_self(),
81
return true;
94
&buf_rx,
82
}
95
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
96
*/
84
int width = 8 * memop_size(mop);
97
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
85
98
{
86
if (width < 64) {
99
- void *buf, *aligned, *end;
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
100
- size_t total_size;
88
- if (!(mop & MO_SIGN)) {
101
size_t page_size;
89
+ if (mop & MO_SIGN) {
102
size_t region_size;
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
103
- size_t n_regions;
91
+ } else {
104
size_t i;
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
105
bool ok;
93
- ctx->s_mask <<= 1;
106
94
}
107
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
95
}
108
splitwx, &error_fatal);
96
109
assert(ok);
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
110
98
fold_setcond_tst_pow2(ctx, op, false);
111
- buf = tcg_init_ctx.code_gen_buffer;
99
112
- total_size = tcg_init_ctx.code_gen_buffer_size;
100
ctx->z_mask = 1;
113
- page_size = qemu_real_host_page_size;
101
- ctx->s_mask = smask_from_zmask(1);
114
- n_regions = tcg_n_regions(total_size, max_cpus);
102
return false;
115
-
103
}
116
- /* The first region will be 'aligned - buf' bytes larger than the others */
104
117
- aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
118
- g_assert(aligned < tcg_init_ctx.code_gen_buffer + total_size);
106
}
119
-
107
120
/*
108
ctx->z_mask = 1;
121
* Make region_size a multiple of page_size, using aligned as the start.
109
- ctx->s_mask = smask_from_zmask(1);
122
* As a result of this we might end up with a few extra pages at the end of
110
return false;
123
* the buffer; we will assign those to the last region.
111
124
*/
112
do_setcond_const:
125
- region_size = (total_size - (aligned - buf)) / n_regions;
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
126
+ region.n = tcg_n_regions(region.total_size, max_cpus);
114
break;
127
+ page_size = qemu_real_host_page_size;
115
CASE_OP_32_64(ld8u):
128
+ region_size = region.total_size / region.n;
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
129
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
130
118
break;
131
/* A region must have at least 2 pages; one code, one guard */
119
CASE_OP_32_64(ld16s):
132
g_assert(region_size >= 2 * page_size);
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
133
+ region.stride = region_size;
121
break;
134
+
122
CASE_OP_32_64(ld16u):
135
+ /* Reserve space for guard pages. */
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
136
+ region.size = region_size - page_size;
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
137
+ region.total_size -= page_size;
125
break;
138
+
126
case INDEX_op_ld32s_i64:
139
+ /*
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
140
+ * The first region will be smaller than the others, via the prologue,
128
break;
141
+ * which has yet to be allocated. For now, the first region begins at
129
case INDEX_op_ld32u_i64:
142
+ * the page boundary.
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
143
+ */
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
144
+ region.after_prologue = region.start_aligned;
132
break;
145
133
default:
146
/* init the region struct */
134
g_assert_not_reached();
147
qemu_mutex_init(&region.lock);
148
- region.n = n_regions;
149
- region.size = region_size - page_size;
150
- region.stride = region_size;
151
- region.after_prologue = buf;
152
- region.start_aligned = aligned;
153
- /* page-align the end, since its last page will be a guard page */
154
- end = QEMU_ALIGN_PTR_DOWN(buf + total_size, page_size);
155
- /* account for that last guard page */
156
- end -= page_size;
157
- total_size = end - aligned;
158
- region.total_size = total_size;
159
160
/*
161
* Set guard pages in the rw buffer, as that's the one into which
162
--
135
--
163
2.25.1
136
2.43.0
164
165
diff view generated by jsdifflib
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
1
Change the representation from sign bit repetitions to all bits equal
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
to the sign bit, including the sign bit itself.
3
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
23
---
5
tcg/tcg-internal.h | 37 +++
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
6
tcg/region.c | 572 +++++++++++++++++++++++++++++++++++++++++++++
25
1 file changed, 15 insertions(+), 49 deletions(-)
7
tcg/tcg.c | 547 +------------------------------------------
8
tcg/meson.build | 1 +
9
4 files changed, 613 insertions(+), 544 deletions(-)
10
create mode 100644 tcg/tcg-internal.h
11
create mode 100644 tcg/region.c
12
26
13
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
new file mode 100644
15
index XXXXXXX..XXXXXXX
16
--- /dev/null
17
+++ b/tcg/tcg-internal.h
18
@@ -XXX,XX +XXX,XX @@
19
+/*
20
+ * Internal declarations for Tiny Code Generator for QEMU
21
+ *
22
+ * Copyright (c) 2008 Fabrice Bellard
23
+ *
24
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
25
+ * of this software and associated documentation files (the "Software"), to deal
26
+ * in the Software without restriction, including without limitation the rights
27
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
28
+ * copies of the Software, and to permit persons to whom the Software is
29
+ * furnished to do so, subject to the following conditions:
30
+ *
31
+ * The above copyright notice and this permission notice shall be included in
32
+ * all copies or substantial portions of the Software.
33
+ *
34
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
35
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
37
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
38
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
39
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
40
+ * THE SOFTWARE.
41
+ */
42
+
43
+#ifndef TCG_INTERNAL_H
44
+#define TCG_INTERNAL_H 1
45
+
46
+#define TCG_HIGHWATER 1024
47
+
48
+extern TCGContext **tcg_ctxs;
49
+extern unsigned int n_tcg_ctxs;
50
+
51
+bool tcg_region_alloc(TCGContext *s);
52
+void tcg_region_initial_alloc(TCGContext *s);
53
+void tcg_region_prologue_set(TCGContext *s);
54
+
55
+#endif /* TCG_INTERNAL_H */
56
diff --git a/tcg/region.c b/tcg/region.c
57
new file mode 100644
58
index XXXXXXX..XXXXXXX
59
--- /dev/null
60
+++ b/tcg/region.c
61
@@ -XXX,XX +XXX,XX @@
62
+/*
63
+ * Memory region management for Tiny Code Generator for QEMU
64
+ *
65
+ * Copyright (c) 2008 Fabrice Bellard
66
+ *
67
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
68
+ * of this software and associated documentation files (the "Software"), to deal
69
+ * in the Software without restriction, including without limitation the rights
70
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
71
+ * copies of the Software, and to permit persons to whom the Software is
72
+ * furnished to do so, subject to the following conditions:
73
+ *
74
+ * The above copyright notice and this permission notice shall be included in
75
+ * all copies or substantial portions of the Software.
76
+ *
77
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
78
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
79
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
80
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
81
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
82
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
83
+ * THE SOFTWARE.
84
+ */
85
+
86
+#include "qemu/osdep.h"
87
+#include "exec/exec-all.h"
88
+#include "tcg/tcg.h"
89
+#if !defined(CONFIG_USER_ONLY)
90
+#include "hw/boards.h"
91
+#endif
92
+#include "tcg-internal.h"
93
+
94
+
95
+struct tcg_region_tree {
96
+ QemuMutex lock;
97
+ GTree *tree;
98
+ /* padding to avoid false sharing is computed at run-time */
99
+};
100
+
101
+/*
102
+ * We divide code_gen_buffer into equally-sized "regions" that TCG threads
103
+ * dynamically allocate from as demand dictates. Given appropriate region
104
+ * sizing, this minimizes flushes even when some TCG threads generate a lot
105
+ * more code than others.
106
+ */
107
+struct tcg_region_state {
108
+ QemuMutex lock;
109
+
110
+ /* fields set at init time */
111
+ void *start;
112
+ void *start_aligned;
113
+ void *end;
114
+ size_t n;
115
+ size_t size; /* size of one region */
116
+ size_t stride; /* .size + guard size */
117
+
118
+ /* fields protected by the lock */
119
+ size_t current; /* current region index */
120
+ size_t agg_size_full; /* aggregate size of full regions */
121
+};
122
+
123
+static struct tcg_region_state region;
124
+
125
+/*
126
+ * This is an array of struct tcg_region_tree's, with padding.
127
+ * We use void * to simplify the computation of region_trees[i]; each
128
+ * struct is found every tree_size bytes.
129
+ */
130
+static void *region_trees;
131
+static size_t tree_size;
132
+
133
+/* compare a pointer @ptr and a tb_tc @s */
134
+static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
135
+{
136
+ if (ptr >= s->ptr + s->size) {
137
+ return 1;
138
+ } else if (ptr < s->ptr) {
139
+ return -1;
140
+ }
141
+ return 0;
142
+}
143
+
144
+static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
145
+{
146
+ const struct tb_tc *a = ap;
147
+ const struct tb_tc *b = bp;
148
+
149
+ /*
150
+ * When both sizes are set, we know this isn't a lookup.
151
+ * This is the most likely case: every TB must be inserted; lookups
152
+ * are a lot less frequent.
153
+ */
154
+ if (likely(a->size && b->size)) {
155
+ if (a->ptr > b->ptr) {
156
+ return 1;
157
+ } else if (a->ptr < b->ptr) {
158
+ return -1;
159
+ }
160
+ /* a->ptr == b->ptr should happen only on deletions */
161
+ g_assert(a->size == b->size);
162
+ return 0;
163
+ }
164
+ /*
165
+ * All lookups have either .size field set to 0.
166
+ * From the glib sources we see that @ap is always the lookup key. However
167
+ * the docs provide no guarantee, so we just mark this case as likely.
168
+ */
169
+ if (likely(a->size == 0)) {
170
+ return ptr_cmp_tb_tc(a->ptr, b);
171
+ }
172
+ return ptr_cmp_tb_tc(b->ptr, a);
173
+}
174
+
175
+static void tcg_region_trees_init(void)
176
+{
177
+ size_t i;
178
+
179
+ tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
180
+ region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
181
+ for (i = 0; i < region.n; i++) {
182
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
183
+
184
+ qemu_mutex_init(&rt->lock);
185
+ rt->tree = g_tree_new(tb_tc_cmp);
186
+ }
187
+}
188
+
189
+static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
190
+{
191
+ size_t region_idx;
192
+
193
+ /*
194
+ * Like tcg_splitwx_to_rw, with no assert. The pc may come from
195
+ * a signal handler over which the caller has no control.
196
+ */
197
+ if (!in_code_gen_buffer(p)) {
198
+ p -= tcg_splitwx_diff;
199
+ if (!in_code_gen_buffer(p)) {
200
+ return NULL;
201
+ }
202
+ }
203
+
204
+ if (p < region.start_aligned) {
205
+ region_idx = 0;
206
+ } else {
207
+ ptrdiff_t offset = p - region.start_aligned;
208
+
209
+ if (offset > region.stride * (region.n - 1)) {
210
+ region_idx = region.n - 1;
211
+ } else {
212
+ region_idx = offset / region.stride;
213
+ }
214
+ }
215
+ return region_trees + region_idx * tree_size;
216
+}
217
+
218
+void tcg_tb_insert(TranslationBlock *tb)
219
+{
220
+ struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
221
+
222
+ g_assert(rt != NULL);
223
+ qemu_mutex_lock(&rt->lock);
224
+ g_tree_insert(rt->tree, &tb->tc, tb);
225
+ qemu_mutex_unlock(&rt->lock);
226
+}
227
+
228
+void tcg_tb_remove(TranslationBlock *tb)
229
+{
230
+ struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
231
+
232
+ g_assert(rt != NULL);
233
+ qemu_mutex_lock(&rt->lock);
234
+ g_tree_remove(rt->tree, &tb->tc);
235
+ qemu_mutex_unlock(&rt->lock);
236
+}
237
+
238
+/*
239
+ * Find the TB 'tb' such that
240
+ * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
241
+ * Return NULL if not found.
242
+ */
243
+TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
244
+{
245
+ struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
246
+ TranslationBlock *tb;
247
+ struct tb_tc s = { .ptr = (void *)tc_ptr };
248
+
249
+ if (rt == NULL) {
250
+ return NULL;
251
+ }
252
+
253
+ qemu_mutex_lock(&rt->lock);
254
+ tb = g_tree_lookup(rt->tree, &s);
255
+ qemu_mutex_unlock(&rt->lock);
256
+ return tb;
257
+}
258
+
259
+static void tcg_region_tree_lock_all(void)
260
+{
261
+ size_t i;
262
+
263
+ for (i = 0; i < region.n; i++) {
264
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
265
+
266
+ qemu_mutex_lock(&rt->lock);
267
+ }
268
+}
269
+
270
+static void tcg_region_tree_unlock_all(void)
271
+{
272
+ size_t i;
273
+
274
+ for (i = 0; i < region.n; i++) {
275
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
276
+
277
+ qemu_mutex_unlock(&rt->lock);
278
+ }
279
+}
280
+
281
+void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
282
+{
283
+ size_t i;
284
+
285
+ tcg_region_tree_lock_all();
286
+ for (i = 0; i < region.n; i++) {
287
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
288
+
289
+ g_tree_foreach(rt->tree, func, user_data);
290
+ }
291
+ tcg_region_tree_unlock_all();
292
+}
293
+
294
+size_t tcg_nb_tbs(void)
295
+{
296
+ size_t nb_tbs = 0;
297
+ size_t i;
298
+
299
+ tcg_region_tree_lock_all();
300
+ for (i = 0; i < region.n; i++) {
301
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
302
+
303
+ nb_tbs += g_tree_nnodes(rt->tree);
304
+ }
305
+ tcg_region_tree_unlock_all();
306
+ return nb_tbs;
307
+}
308
+
309
+static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
310
+{
311
+ TranslationBlock *tb = v;
312
+
313
+ tb_destroy(tb);
314
+ return FALSE;
315
+}
316
+
317
+static void tcg_region_tree_reset_all(void)
318
+{
319
+ size_t i;
320
+
321
+ tcg_region_tree_lock_all();
322
+ for (i = 0; i < region.n; i++) {
323
+ struct tcg_region_tree *rt = region_trees + i * tree_size;
324
+
325
+ g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
326
+ /* Increment the refcount first so that destroy acts as a reset */
327
+ g_tree_ref(rt->tree);
328
+ g_tree_destroy(rt->tree);
329
+ }
330
+ tcg_region_tree_unlock_all();
331
+}
332
+
333
+static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
334
+{
335
+ void *start, *end;
336
+
337
+ start = region.start_aligned + curr_region * region.stride;
338
+ end = start + region.size;
339
+
340
+ if (curr_region == 0) {
341
+ start = region.start;
342
+ }
343
+ if (curr_region == region.n - 1) {
344
+ end = region.end;
345
+ }
346
+
347
+ *pstart = start;
348
+ *pend = end;
349
+}
350
+
351
+static void tcg_region_assign(TCGContext *s, size_t curr_region)
352
+{
353
+ void *start, *end;
354
+
355
+ tcg_region_bounds(curr_region, &start, &end);
356
+
357
+ s->code_gen_buffer = start;
358
+ s->code_gen_ptr = start;
359
+ s->code_gen_buffer_size = end - start;
360
+ s->code_gen_highwater = end - TCG_HIGHWATER;
361
+}
362
+
363
+static bool tcg_region_alloc__locked(TCGContext *s)
364
+{
365
+ if (region.current == region.n) {
366
+ return true;
367
+ }
368
+ tcg_region_assign(s, region.current);
369
+ region.current++;
370
+ return false;
371
+}
372
+
373
+/*
374
+ * Request a new region once the one in use has filled up.
375
+ * Returns true on error.
376
+ */
377
+bool tcg_region_alloc(TCGContext *s)
378
+{
379
+ bool err;
380
+ /* read the region size now; alloc__locked will overwrite it on success */
381
+ size_t size_full = s->code_gen_buffer_size;
382
+
383
+ qemu_mutex_lock(&region.lock);
384
+ err = tcg_region_alloc__locked(s);
385
+ if (!err) {
386
+ region.agg_size_full += size_full - TCG_HIGHWATER;
387
+ }
388
+ qemu_mutex_unlock(&region.lock);
389
+ return err;
390
+}
391
+
392
+/*
393
+ * Perform a context's first region allocation.
394
+ * This function does _not_ increment region.agg_size_full.
395
+ */
396
+static void tcg_region_initial_alloc__locked(TCGContext *s)
397
+{
398
+ bool err = tcg_region_alloc__locked(s);
399
+ g_assert(!err);
400
+}
401
+
402
+void tcg_region_initial_alloc(TCGContext *s)
403
+{
404
+ qemu_mutex_lock(&region.lock);
405
+ tcg_region_initial_alloc__locked(s);
406
+ qemu_mutex_unlock(&region.lock);
407
+}
408
+
409
+/* Call from a safe-work context */
410
+void tcg_region_reset_all(void)
411
+{
412
+ unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
413
+ unsigned int i;
414
+
415
+ qemu_mutex_lock(&region.lock);
416
+ region.current = 0;
417
+ region.agg_size_full = 0;
418
+
419
+ for (i = 0; i < n_ctxs; i++) {
420
+ TCGContext *s = qatomic_read(&tcg_ctxs[i]);
421
+ tcg_region_initial_alloc__locked(s);
422
+ }
423
+ qemu_mutex_unlock(&region.lock);
424
+
425
+ tcg_region_tree_reset_all();
426
+}
427
+
428
+#ifdef CONFIG_USER_ONLY
429
+static size_t tcg_n_regions(void)
430
+{
431
+ return 1;
432
+}
433
+#else
434
+/*
435
+ * It is likely that some vCPUs will translate more code than others, so we
436
+ * first try to set more regions than max_cpus, with those regions being of
437
+ * reasonable size. If that's not possible we make do by evenly dividing
438
+ * the code_gen_buffer among the vCPUs.
439
+ */
440
+static size_t tcg_n_regions(void)
441
+{
442
+ size_t i;
443
+
444
+ /* Use a single region if all we have is one vCPU thread */
445
+#if !defined(CONFIG_USER_ONLY)
446
+ MachineState *ms = MACHINE(qdev_get_machine());
447
+ unsigned int max_cpus = ms->smp.max_cpus;
448
+#endif
449
+ if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
450
+ return 1;
451
+ }
452
+
453
+ /* Try to have more regions than max_cpus, with each region being >= 2 MB */
454
+ for (i = 8; i > 0; i--) {
455
+ size_t regions_per_thread = i;
456
+ size_t region_size;
457
+
458
+ region_size = tcg_init_ctx.code_gen_buffer_size;
459
+ region_size /= max_cpus * regions_per_thread;
460
+
461
+ if (region_size >= 2 * 1024u * 1024) {
462
+ return max_cpus * regions_per_thread;
463
+ }
464
+ }
465
+ /* If we can't, then just allocate one region per vCPU thread */
466
+ return max_cpus;
467
+}
468
+#endif
469
+
470
+/*
471
+ * Initializes region partitioning.
472
+ *
473
+ * Called at init time from the parent thread (i.e. the one calling
474
+ * tcg_context_init), after the target's TCG globals have been set.
475
+ *
476
+ * Region partitioning works by splitting code_gen_buffer into separate regions,
477
+ * and then assigning regions to TCG threads so that the threads can translate
478
+ * code in parallel without synchronization.
479
+ *
480
+ * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
481
+ * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
482
+ * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
483
+ * must have been parsed before calling this function, since it calls
484
+ * qemu_tcg_mttcg_enabled().
485
+ *
486
+ * In user-mode we use a single region. Having multiple regions in user-mode
487
+ * is not supported, because the number of vCPU threads (recall that each thread
488
+ * spawned by the guest corresponds to a vCPU thread) is only bounded by the
489
+ * OS, and usually this number is huge (tens of thousands is not uncommon).
490
+ * Thus, given this large bound on the number of vCPU threads and the fact
491
+ * that code_gen_buffer is allocated at compile-time, we cannot guarantee
492
+ * that the availability of at least one region per vCPU thread.
493
+ *
494
+ * However, this user-mode limitation is unlikely to be a significant problem
495
+ * in practice. Multi-threaded guests share most if not all of their translated
496
+ * code, which makes parallel code generation less appealing than in softmmu.
497
+ */
498
+void tcg_region_init(void)
499
+{
500
+ void *buf = tcg_init_ctx.code_gen_buffer;
501
+ void *aligned;
502
+ size_t size = tcg_init_ctx.code_gen_buffer_size;
503
+ size_t page_size = qemu_real_host_page_size;
504
+ size_t region_size;
505
+ size_t n_regions;
506
+ size_t i;
507
+
508
+ n_regions = tcg_n_regions();
509
+
510
+ /* The first region will be 'aligned - buf' bytes larger than the others */
511
+ aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
512
+ g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
513
+ /*
514
+ * Make region_size a multiple of page_size, using aligned as the start.
515
+ * As a result of this we might end up with a few extra pages at the end of
516
+ * the buffer; we will assign those to the last region.
517
+ */
518
+ region_size = (size - (aligned - buf)) / n_regions;
519
+ region_size = QEMU_ALIGN_DOWN(region_size, page_size);
520
+
521
+ /* A region must have at least 2 pages; one code, one guard */
522
+ g_assert(region_size >= 2 * page_size);
523
+
524
+ /* init the region struct */
525
+ qemu_mutex_init(&region.lock);
526
+ region.n = n_regions;
527
+ region.size = region_size - page_size;
528
+ region.stride = region_size;
529
+ region.start = buf;
530
+ region.start_aligned = aligned;
531
+ /* page-align the end, since its last page will be a guard page */
532
+ region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
533
+ /* account for that last guard page */
534
+ region.end -= page_size;
535
+
536
+ /*
537
+ * Set guard pages in the rw buffer, as that's the one into which
538
+ * buffer overruns could occur. Do not set guard pages in the rx
539
+ * buffer -- let that one use hugepages throughout.
540
+ */
541
+ for (i = 0; i < region.n; i++) {
542
+ void *start, *end;
543
+
544
+ tcg_region_bounds(i, &start, &end);
545
+
546
+ /*
547
+ * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
548
+ * rejects a permission change from RWX -> NONE. Guard pages are
549
+ * nice for bug detection but are not essential; ignore any failure.
550
+ */
551
+ (void)qemu_mprotect_none(end, page_size);
552
+ }
553
+
554
+ tcg_region_trees_init();
555
+
556
+ /*
557
+ * Leave the initial context initialized to the first region.
558
+ * This will be the context into which we generate the prologue.
559
+ * It is also the only context for CONFIG_USER_ONLY.
560
+ */
561
+ tcg_region_initial_alloc__locked(&tcg_init_ctx);
562
+}
563
+
564
+void tcg_region_prologue_set(TCGContext *s)
565
+{
566
+ /* Deduct the prologue from the first region. */
567
+ g_assert(region.start == s->code_gen_buffer);
568
+ region.start = s->code_ptr;
569
+
570
+ /* Recompute boundaries of the first region. */
571
+ tcg_region_assign(s, 0);
572
+
573
+ /* Register the balance of the buffer with gdb. */
574
+ tcg_register_jit(tcg_splitwx_to_rx(region.start),
575
+ region.end - region.start);
576
+}
577
+
578
+/*
579
+ * Returns the size (in bytes) of all translated code (i.e. from all regions)
580
+ * currently in the cache.
581
+ * See also: tcg_code_capacity()
582
+ * Do not confuse with tcg_current_code_size(); that one applies to a single
583
+ * TCG context.
584
+ */
585
+size_t tcg_code_size(void)
586
+{
587
+ unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
588
+ unsigned int i;
589
+ size_t total;
590
+
591
+ qemu_mutex_lock(&region.lock);
592
+ total = region.agg_size_full;
593
+ for (i = 0; i < n_ctxs; i++) {
594
+ const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
595
+ size_t size;
596
+
597
+ size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
598
+ g_assert(size <= s->code_gen_buffer_size);
599
+ total += size;
600
+ }
601
+ qemu_mutex_unlock(&region.lock);
602
+ return total;
603
+}
604
+
605
+/*
606
+ * Returns the code capacity (in bytes) of the entire cache, i.e. including all
607
+ * regions.
608
+ * See also: tcg_code_size()
609
+ */
610
+size_t tcg_code_capacity(void)
611
+{
612
+ size_t guard_size, capacity;
613
+
614
+ /* no need for synchronization; these variables are set at init time */
615
+ guard_size = region.stride - region.size;
616
+ capacity = region.end + guard_size - region.start;
617
+ capacity -= region.n * (guard_size + TCG_HIGHWATER);
618
+ return capacity;
619
+}
620
+
621
+size_t tcg_tb_phys_invalidate_count(void)
622
+{
623
+ unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
624
+ unsigned int i;
625
+ size_t total = 0;
626
+
627
+ for (i = 0; i < n_ctxs; i++) {
628
+ const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
629
+
630
+ total += qatomic_read(&s->tb_phys_invalidate_count);
631
+ }
632
+ return total;
633
+}
634
diff --git a/tcg/tcg.c b/tcg/tcg.c
635
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
636
--- a/tcg/tcg.c
29
--- a/tcg/optimize.c
637
+++ b/tcg/tcg.c
30
+++ b/tcg/optimize.c
638
@@ -XXX,XX +XXX,XX @@
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
639
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
640
#include "elf.h"
33
uint64_t val;
641
#include "exec/log.h"
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
642
+#include "tcg-internal.h"
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
643
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
644
/* Forward declarations for functions declared in tcg-target.c.inc and
37
} TempOptInfo;
645
used here. */
38
646
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
39
typedef struct OptContext {
647
static int tcg_out_ldst_finalize(TCGContext *s);
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
648
#endif
41
649
42
/* In flight values from optimization. */
650
-#define TCG_HIGHWATER 1024
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
651
-
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
652
-static TCGContext **tcg_ctxs;
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
653
-static unsigned int n_tcg_ctxs;
46
TCGType type;
654
+TCGContext **tcg_ctxs;
47
} OptContext;
655
+unsigned int n_tcg_ctxs;
48
656
TCGv_env cpu_env = 0;
49
-/* Calculate the smask for a specific value. */
657
const void *tcg_code_gen_epilogue;
50
-static uint64_t smask_from_value(uint64_t value)
658
uintptr_t tcg_splitwx_diff;
659
@@ -XXX,XX +XXX,XX @@ uintptr_t tcg_splitwx_diff;
660
tcg_prologue_fn *tcg_qemu_tb_exec;
661
#endif
662
663
-struct tcg_region_tree {
664
- QemuMutex lock;
665
- GTree *tree;
666
- /* padding to avoid false sharing is computed at run-time */
667
-};
668
-
669
-/*
670
- * We divide code_gen_buffer into equally-sized "regions" that TCG threads
671
- * dynamically allocate from as demand dictates. Given appropriate region
672
- * sizing, this minimizes flushes even when some TCG threads generate a lot
673
- * more code than others.
674
- */
675
-struct tcg_region_state {
676
- QemuMutex lock;
677
-
678
- /* fields set at init time */
679
- void *start;
680
- void *start_aligned;
681
- void *end;
682
- size_t n;
683
- size_t size; /* size of one region */
684
- size_t stride; /* .size + guard size */
685
-
686
- /* fields protected by the lock */
687
- size_t current; /* current region index */
688
- size_t agg_size_full; /* aggregate size of full regions */
689
-};
690
-
691
-static struct tcg_region_state region;
692
-/*
693
- * This is an array of struct tcg_region_tree's, with padding.
694
- * We use void * to simplify the computation of region_trees[i]; each
695
- * struct is found every tree_size bytes.
696
- */
697
-static void *region_trees;
698
-static size_t tree_size;
699
static TCGRegSet tcg_target_available_regs[TCG_TYPE_COUNT];
700
static TCGRegSet tcg_target_call_clobber_regs;
701
702
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef constraint_sets[] = {
703
704
#include "tcg-target.c.inc"
705
706
-/* compare a pointer @ptr and a tb_tc @s */
707
-static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
708
-{
51
-{
709
- if (ptr >= s->ptr + s->size) {
52
- int rep = clrsb64(value);
710
- return 1;
53
- return ~(~0ull >> rep);
711
- } else if (ptr < s->ptr) {
712
- return -1;
713
- }
714
- return 0;
715
-}
716
-
717
-static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
718
-{
719
- const struct tb_tc *a = ap;
720
- const struct tb_tc *b = bp;
721
-
722
- /*
723
- * When both sizes are set, we know this isn't a lookup.
724
- * This is the most likely case: every TB must be inserted; lookups
725
- * are a lot less frequent.
726
- */
727
- if (likely(a->size && b->size)) {
728
- if (a->ptr > b->ptr) {
729
- return 1;
730
- } else if (a->ptr < b->ptr) {
731
- return -1;
732
- }
733
- /* a->ptr == b->ptr should happen only on deletions */
734
- g_assert(a->size == b->size);
735
- return 0;
736
- }
737
- /*
738
- * All lookups have either .size field set to 0.
739
- * From the glib sources we see that @ap is always the lookup key. However
740
- * the docs provide no guarantee, so we just mark this case as likely.
741
- */
742
- if (likely(a->size == 0)) {
743
- return ptr_cmp_tb_tc(a->ptr, b);
744
- }
745
- return ptr_cmp_tb_tc(b->ptr, a);
746
-}
747
-
748
-static void tcg_region_trees_init(void)
749
-{
750
- size_t i;
751
-
752
- tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
753
- region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
754
- for (i = 0; i < region.n; i++) {
755
- struct tcg_region_tree *rt = region_trees + i * tree_size;
756
-
757
- qemu_mutex_init(&rt->lock);
758
- rt->tree = g_tree_new(tb_tc_cmp);
759
- }
760
-}
761
-
762
-static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
763
-{
764
- size_t region_idx;
765
-
766
- /*
767
- * Like tcg_splitwx_to_rw, with no assert. The pc may come from
768
- * a signal handler over which the caller has no control.
769
- */
770
- if (!in_code_gen_buffer(p)) {
771
- p -= tcg_splitwx_diff;
772
- if (!in_code_gen_buffer(p)) {
773
- return NULL;
774
- }
775
- }
776
-
777
- if (p < region.start_aligned) {
778
- region_idx = 0;
779
- } else {
780
- ptrdiff_t offset = p - region.start_aligned;
781
-
782
- if (offset > region.stride * (region.n - 1)) {
783
- region_idx = region.n - 1;
784
- } else {
785
- region_idx = offset / region.stride;
786
- }
787
- }
788
- return region_trees + region_idx * tree_size;
789
-}
790
-
791
-void tcg_tb_insert(TranslationBlock *tb)
792
-{
793
- struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
794
-
795
- g_assert(rt != NULL);
796
- qemu_mutex_lock(&rt->lock);
797
- g_tree_insert(rt->tree, &tb->tc, tb);
798
- qemu_mutex_unlock(&rt->lock);
799
-}
800
-
801
-void tcg_tb_remove(TranslationBlock *tb)
802
-{
803
- struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
804
-
805
- g_assert(rt != NULL);
806
- qemu_mutex_lock(&rt->lock);
807
- g_tree_remove(rt->tree, &tb->tc);
808
- qemu_mutex_unlock(&rt->lock);
809
-}
54
-}
810
-
55
-
811
-/*
56
-/*
812
- * Find the TB 'tb' such that
57
- * Calculate the smask for a given set of known-zeros.
813
- * tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
58
- * If there are lots of zeros on the left, we can consider the remainder
814
- * Return NULL if not found.
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
815
- */
61
- */
816
-TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
62
-static uint64_t smask_from_zmask(uint64_t zmask)
817
-{
63
-{
818
- struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
64
- /*
819
- TranslationBlock *tb;
65
- * Only the 0 bits are significant for zmask, thus the msb itself
820
- struct tb_tc s = { .ptr = (void *)tc_ptr };
66
- * must be zero, else we have no sign information.
821
-
67
- */
822
- if (rt == NULL) {
68
- int rep = clz64(zmask);
823
- return NULL;
69
- if (rep == 0) {
70
- return 0;
824
- }
71
- }
825
-
72
- rep -= 1;
826
- qemu_mutex_lock(&rt->lock);
73
- return ~(~0ull >> rep);
827
- tb = g_tree_lookup(rt->tree, &s);
828
- qemu_mutex_unlock(&rt->lock);
829
- return tb;
830
-}
831
-
832
-static void tcg_region_tree_lock_all(void)
833
-{
834
- size_t i;
835
-
836
- for (i = 0; i < region.n; i++) {
837
- struct tcg_region_tree *rt = region_trees + i * tree_size;
838
-
839
- qemu_mutex_lock(&rt->lock);
840
- }
841
-}
842
-
843
-static void tcg_region_tree_unlock_all(void)
844
-{
845
- size_t i;
846
-
847
- for (i = 0; i < region.n; i++) {
848
- struct tcg_region_tree *rt = region_trees + i * tree_size;
849
-
850
- qemu_mutex_unlock(&rt->lock);
851
- }
852
-}
853
-
854
-void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
855
-{
856
- size_t i;
857
-
858
- tcg_region_tree_lock_all();
859
- for (i = 0; i < region.n; i++) {
860
- struct tcg_region_tree *rt = region_trees + i * tree_size;
861
-
862
- g_tree_foreach(rt->tree, func, user_data);
863
- }
864
- tcg_region_tree_unlock_all();
865
-}
866
-
867
-size_t tcg_nb_tbs(void)
868
-{
869
- size_t nb_tbs = 0;
870
- size_t i;
871
-
872
- tcg_region_tree_lock_all();
873
- for (i = 0; i < region.n; i++) {
874
- struct tcg_region_tree *rt = region_trees + i * tree_size;
875
-
876
- nb_tbs += g_tree_nnodes(rt->tree);
877
- }
878
- tcg_region_tree_unlock_all();
879
- return nb_tbs;
880
-}
881
-
882
-static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
883
-{
884
- TranslationBlock *tb = v;
885
-
886
- tb_destroy(tb);
887
- return FALSE;
888
-}
889
-
890
-static void tcg_region_tree_reset_all(void)
891
-{
892
- size_t i;
893
-
894
- tcg_region_tree_lock_all();
895
- for (i = 0; i < region.n; i++) {
896
- struct tcg_region_tree *rt = region_trees + i * tree_size;
897
-
898
- g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
899
- /* Increment the refcount first so that destroy acts as a reset */
900
- g_tree_ref(rt->tree);
901
- g_tree_destroy(rt->tree);
902
- }
903
- tcg_region_tree_unlock_all();
904
-}
905
-
906
-static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
907
-{
908
- void *start, *end;
909
-
910
- start = region.start_aligned + curr_region * region.stride;
911
- end = start + region.size;
912
-
913
- if (curr_region == 0) {
914
- start = region.start;
915
- }
916
- if (curr_region == region.n - 1) {
917
- end = region.end;
918
- }
919
-
920
- *pstart = start;
921
- *pend = end;
922
-}
923
-
924
-static void tcg_region_assign(TCGContext *s, size_t curr_region)
925
-{
926
- void *start, *end;
927
-
928
- tcg_region_bounds(curr_region, &start, &end);
929
-
930
- s->code_gen_buffer = start;
931
- s->code_gen_ptr = start;
932
- s->code_gen_buffer_size = end - start;
933
- s->code_gen_highwater = end - TCG_HIGHWATER;
934
-}
935
-
936
-static bool tcg_region_alloc__locked(TCGContext *s)
937
-{
938
- if (region.current == region.n) {
939
- return true;
940
- }
941
- tcg_region_assign(s, region.current);
942
- region.current++;
943
- return false;
944
-}
74
-}
945
-
75
-
946
-/*
76
-/*
947
- * Request a new region once the one in use has filled up.
77
- * Recreate a properly left-aligned smask after manipulation.
948
- * Returns true on error.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
949
- */
81
- */
950
-static bool tcg_region_alloc(TCGContext *s)
82
-static uint64_t smask_from_smask(int64_t smask)
951
-{
83
-{
952
- bool err;
84
- /* Only the 1 bits are significant for smask */
953
- /* read the region size now; alloc__locked will overwrite it on success */
85
- return smask_from_zmask(~smask);
954
- size_t size_full = s->code_gen_buffer_size;
955
-
956
- qemu_mutex_lock(&region.lock);
957
- err = tcg_region_alloc__locked(s);
958
- if (!err) {
959
- region.agg_size_full += size_full - TCG_HIGHWATER;
960
- }
961
- qemu_mutex_unlock(&region.lock);
962
- return err;
963
-}
86
-}
964
-
87
-
965
-/*
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
966
- * Perform a context's first region allocation.
967
- * This function does _not_ increment region.agg_size_full.
968
- */
969
-static void tcg_region_initial_alloc__locked(TCGContext *s)
970
-{
971
- bool err = tcg_region_alloc__locked(s);
972
- g_assert(!err);
973
-}
974
-
975
-#ifndef CONFIG_USER_ONLY
976
-static void tcg_region_initial_alloc(TCGContext *s)
977
-{
978
- qemu_mutex_lock(&region.lock);
979
- tcg_region_initial_alloc__locked(s);
980
- qemu_mutex_unlock(&region.lock);
981
-}
982
-#endif
983
-
984
-/* Call from a safe-work context */
985
-void tcg_region_reset_all(void)
986
-{
987
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
988
- unsigned int i;
989
-
990
- qemu_mutex_lock(&region.lock);
991
- region.current = 0;
992
- region.agg_size_full = 0;
993
-
994
- for (i = 0; i < n_ctxs; i++) {
995
- TCGContext *s = qatomic_read(&tcg_ctxs[i]);
996
- tcg_region_initial_alloc__locked(s);
997
- }
998
- qemu_mutex_unlock(&region.lock);
999
-
1000
- tcg_region_tree_reset_all();
1001
-}
1002
-
1003
-#ifdef CONFIG_USER_ONLY
1004
-static size_t tcg_n_regions(void)
1005
-{
1006
- return 1;
1007
-}
1008
-#else
1009
-/*
1010
- * It is likely that some vCPUs will translate more code than others, so we
1011
- * first try to set more regions than max_cpus, with those regions being of
1012
- * reasonable size. If that's not possible we make do by evenly dividing
1013
- * the code_gen_buffer among the vCPUs.
1014
- */
1015
-static size_t tcg_n_regions(void)
1016
-{
1017
- size_t i;
1018
-
1019
- /* Use a single region if all we have is one vCPU thread */
1020
-#if !defined(CONFIG_USER_ONLY)
1021
- MachineState *ms = MACHINE(qdev_get_machine());
1022
- unsigned int max_cpus = ms->smp.max_cpus;
1023
-#endif
1024
- if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
1025
- return 1;
1026
- }
1027
-
1028
- /* Try to have more regions than max_cpus, with each region being >= 2 MB */
1029
- for (i = 8; i > 0; i--) {
1030
- size_t regions_per_thread = i;
1031
- size_t region_size;
1032
-
1033
- region_size = tcg_init_ctx.code_gen_buffer_size;
1034
- region_size /= max_cpus * regions_per_thread;
1035
-
1036
- if (region_size >= 2 * 1024u * 1024) {
1037
- return max_cpus * regions_per_thread;
1038
- }
1039
- }
1040
- /* If we can't, then just allocate one region per vCPU thread */
1041
- return max_cpus;
1042
-}
1043
-#endif
1044
-
1045
-/*
1046
- * Initializes region partitioning.
1047
- *
1048
- * Called at init time from the parent thread (i.e. the one calling
1049
- * tcg_context_init), after the target's TCG globals have been set.
1050
- *
1051
- * Region partitioning works by splitting code_gen_buffer into separate regions,
1052
- * and then assigning regions to TCG threads so that the threads can translate
1053
- * code in parallel without synchronization.
1054
- *
1055
- * In softmmu the number of TCG threads is bounded by max_cpus, so we use at
1056
- * least max_cpus regions in MTTCG. In !MTTCG we use a single region.
1057
- * Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
1058
- * must have been parsed before calling this function, since it calls
1059
- * qemu_tcg_mttcg_enabled().
1060
- *
1061
- * In user-mode we use a single region. Having multiple regions in user-mode
1062
- * is not supported, because the number of vCPU threads (recall that each thread
1063
- * spawned by the guest corresponds to a vCPU thread) is only bounded by the
1064
- * OS, and usually this number is huge (tens of thousands is not uncommon).
1065
- * Thus, given this large bound on the number of vCPU threads and the fact
1066
- * that code_gen_buffer is allocated at compile-time, we cannot guarantee
1067
- * that the availability of at least one region per vCPU thread.
1068
- *
1069
- * However, this user-mode limitation is unlikely to be a significant problem
1070
- * in practice. Multi-threaded guests share most if not all of their translated
1071
- * code, which makes parallel code generation less appealing than in softmmu.
1072
- */
1073
-void tcg_region_init(void)
1074
-{
1075
- void *buf = tcg_init_ctx.code_gen_buffer;
1076
- void *aligned;
1077
- size_t size = tcg_init_ctx.code_gen_buffer_size;
1078
- size_t page_size = qemu_real_host_page_size;
1079
- size_t region_size;
1080
- size_t n_regions;
1081
- size_t i;
1082
-
1083
- n_regions = tcg_n_regions();
1084
-
1085
- /* The first region will be 'aligned - buf' bytes larger than the others */
1086
- aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
1087
- g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
1088
- /*
1089
- * Make region_size a multiple of page_size, using aligned as the start.
1090
- * As a result of this we might end up with a few extra pages at the end of
1091
- * the buffer; we will assign those to the last region.
1092
- */
1093
- region_size = (size - (aligned - buf)) / n_regions;
1094
- region_size = QEMU_ALIGN_DOWN(region_size, page_size);
1095
-
1096
- /* A region must have at least 2 pages; one code, one guard */
1097
- g_assert(region_size >= 2 * page_size);
1098
-
1099
- /* init the region struct */
1100
- qemu_mutex_init(&region.lock);
1101
- region.n = n_regions;
1102
- region.size = region_size - page_size;
1103
- region.stride = region_size;
1104
- region.start = buf;
1105
- region.start_aligned = aligned;
1106
- /* page-align the end, since its last page will be a guard page */
1107
- region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
1108
- /* account for that last guard page */
1109
- region.end -= page_size;
1110
-
1111
- /*
1112
- * Set guard pages in the rw buffer, as that's the one into which
1113
- * buffer overruns could occur. Do not set guard pages in the rx
1114
- * buffer -- let that one use hugepages throughout.
1115
- */
1116
- for (i = 0; i < region.n; i++) {
1117
- void *start, *end;
1118
-
1119
- tcg_region_bounds(i, &start, &end);
1120
-
1121
- /*
1122
- * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
1123
- * rejects a permission change from RWX -> NONE. Guard pages are
1124
- * nice for bug detection but are not essential; ignore any failure.
1125
- */
1126
- (void)qemu_mprotect_none(end, page_size);
1127
- }
1128
-
1129
- tcg_region_trees_init();
1130
-
1131
- /*
1132
- * Leave the initial context initialized to the first region.
1133
- * This will be the context into which we generate the prologue.
1134
- * It is also the only context for CONFIG_USER_ONLY.
1135
- */
1136
- tcg_region_initial_alloc__locked(&tcg_init_ctx);
1137
-}
1138
-
1139
-static void tcg_region_prologue_set(TCGContext *s)
1140
-{
1141
- /* Deduct the prologue from the first region. */
1142
- g_assert(region.start == s->code_gen_buffer);
1143
- region.start = s->code_ptr;
1144
-
1145
- /* Recompute boundaries of the first region. */
1146
- tcg_region_assign(s, 0);
1147
-
1148
- /* Register the balance of the buffer with gdb. */
1149
- tcg_register_jit(tcg_splitwx_to_rx(region.start),
1150
- region.end - region.start);
1151
-}
1152
-
1153
#ifdef CONFIG_DEBUG_TCG
1154
const void *tcg_splitwx_to_rx(void *rw)
1155
{
89
{
1156
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
106
}
1157
}
107
}
1158
#endif /* !CONFIG_USER_ONLY */
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1159
109
* The passed s_mask may be augmented by z_mask.
1160
-/*
110
*/
1161
- * Returns the size (in bytes) of all translated code (i.e. from all regions)
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
1162
- * currently in the cache.
112
- uint64_t z_mask, uint64_t s_mask)
1163
- * See also: tcg_code_capacity()
113
+ uint64_t z_mask, int64_t s_mask)
1164
- * Do not confuse with tcg_current_code_size(); that one applies to a single
1165
- * TCG context.
1166
- */
1167
-size_t tcg_code_size(void)
1168
-{
1169
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
1170
- unsigned int i;
1171
- size_t total;
1172
-
1173
- qemu_mutex_lock(&region.lock);
1174
- total = region.agg_size_full;
1175
- for (i = 0; i < n_ctxs; i++) {
1176
- const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
1177
- size_t size;
1178
-
1179
- size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
1180
- g_assert(size <= s->code_gen_buffer_size);
1181
- total += size;
1182
- }
1183
- qemu_mutex_unlock(&region.lock);
1184
- return total;
1185
-}
1186
-
1187
-/*
1188
- * Returns the code capacity (in bytes) of the entire cache, i.e. including all
1189
- * regions.
1190
- * See also: tcg_code_size()
1191
- */
1192
-size_t tcg_code_capacity(void)
1193
-{
1194
- size_t guard_size, capacity;
1195
-
1196
- /* no need for synchronization; these variables are set at init time */
1197
- guard_size = region.stride - region.size;
1198
- capacity = region.end + guard_size - region.start;
1199
- capacity -= region.n * (guard_size + TCG_HIGHWATER);
1200
- return capacity;
1201
-}
1202
-
1203
-size_t tcg_tb_phys_invalidate_count(void)
1204
-{
1205
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
1206
- unsigned int i;
1207
- size_t total = 0;
1208
-
1209
- for (i = 0; i < n_ctxs; i++) {
1210
- const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
1211
-
1212
- total += qatomic_read(&s->tb_phys_invalidate_count);
1213
- }
1214
- return total;
1215
-}
1216
-
1217
/* pool based memory allocation */
1218
void *tcg_malloc_internal(TCGContext *s, int size)
1219
{
114
{
1220
diff --git a/tcg/meson.build b/tcg/meson.build
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
1221
index XXXXXXX..XXXXXXX 100644
116
TCGTemp *ts;
1222
--- a/tcg/meson.build
117
TempOptInfo *ti;
1223
+++ b/tcg/meson.build
118
+ int rep;
1224
@@ -XXX,XX +XXX,XX @@ tcg_ss = ss.source_set()
119
1225
120
/* Only single-output opcodes are supported here. */
1226
tcg_ss.add(files(
121
tcg_debug_assert(def->nb_oargs == 1);
1227
'optimize.c',
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
1228
+ 'region.c',
123
*/
1229
'tcg.c',
124
if (ctx->type == TCG_TYPE_I32) {
1230
'tcg-common.c',
125
z_mask = (int32_t)z_mask;
1231
'tcg-op.c',
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
136
+
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
142
+
143
return true;
144
}
145
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
147
148
ctx->z_mask = z_mask;
149
ctx->s_mask = s_mask;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
152
return true;
153
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
157
ctx->s_mask = s_mask;
158
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
166
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
168
- ctx->s_mask = smask_from_smask(s_mask);
169
170
return fold_masks(ctx, op);
171
}
1232
--
172
--
1233
2.25.1
173
2.43.0
1234
1235
diff view generated by jsdifflib
1
There is only one caller, and shortly we will need access
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
to the MachineState, which tcg_init_machine already has.
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
accel/tcg/internal.h | 2 ++
4
tcg/optimize.c | 9 +++++----
9
include/sysemu/tcg.h | 2 --
5
1 file changed, 5 insertions(+), 4 deletions(-)
10
accel/tcg/tcg-all.c | 16 +++++++++++++++-
11
accel/tcg/translate-all.c | 21 ++-------------------
12
bsd-user/main.c | 2 +-
13
5 files changed, 20 insertions(+), 23 deletions(-)
14
6
15
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/internal.h
9
--- a/tcg/optimize.c
18
+++ b/accel/tcg/internal.h
10
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
20
int cflags);
12
remove_mem_copy_all(ctx);
21
13
}
22
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
14
23
+void page_init(void);
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
24
+void tb_htable_init(void);
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
25
26
#endif /* ACCEL_TCG_INTERNAL_H */
27
diff --git a/include/sysemu/tcg.h b/include/sysemu/tcg.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/include/sysemu/tcg.h
30
+++ b/include/sysemu/tcg.h
31
@@ -XXX,XX +XXX,XX @@
32
#ifndef SYSEMU_TCG_H
33
#define SYSEMU_TCG_H
34
35
-void tcg_exec_init(unsigned long tb_size, int splitwx);
36
-
37
#ifdef CONFIG_TCG
38
extern bool tcg_allowed;
39
#define tcg_enabled() (tcg_allowed)
40
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/accel/tcg/tcg-all.c
43
+++ b/accel/tcg/tcg-all.c
44
@@ -XXX,XX +XXX,XX @@
45
#include "qemu/error-report.h"
46
#include "qemu/accel.h"
47
#include "qapi/qapi-builtin-visit.h"
48
+#include "internal.h"
49
50
struct TCGState {
51
AccelState parent_obj;
52
@@ -XXX,XX +XXX,XX @@ static int tcg_init_machine(MachineState *ms)
53
{
17
{
54
TCGState *s = TCG_STATE(current_accel());
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
55
19
int i, nb_oargs;
56
- tcg_exec_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
57
+ tcg_allowed = true;
21
ts_info(ts)->z_mask = ctx->z_mask;
58
mttcg_enabled = s->mttcg_enabled;
22
}
59
+
23
}
60
+ page_init();
24
+ return true;
61
+ tb_htable_init();
62
+ tcg_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
63
+
64
+#if defined(CONFIG_SOFTMMU)
65
+ /*
66
+ * There's no guest base to take into account, so go ahead and
67
+ * initialize the prologue now.
68
+ */
69
+ tcg_prologue_init(tcg_ctx);
70
+#endif
71
+
72
return 0;
73
}
25
}
74
26
75
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
27
/*
76
index XXXXXXX..XXXXXXX 100644
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
77
--- a/accel/tcg/translate-all.c
29
fold_xi_to_x(ctx, op, 0)) {
78
+++ b/accel/tcg/translate-all.c
30
return true;
79
@@ -XXX,XX +XXX,XX @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
31
}
80
return false;
32
- return false;
33
+ return finish_folding(ctx, op);
81
}
34
}
82
35
83
-static void page_init(void)
36
/* We cannot as yet do_constant_folding with vectors. */
84
+void page_init(void)
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
85
{
38
fold_xi_to_x(ctx, op, 0)) {
86
page_size_init();
39
return true;
87
page_table_config_init();
40
}
88
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
41
- return false;
89
a->page_addr[1] == b->page_addr[1];
42
+ return finish_folding(ctx, op);
90
}
43
}
91
44
92
-static void tb_htable_init(void)
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
93
+void tb_htable_init(void)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
94
{
47
op->args[4] = arg_new_constant(ctx, bl);
95
unsigned int mode = QHT_MODE_AUTO_RESIZE;
48
op->args[5] = arg_new_constant(ctx, bh);
96
49
}
97
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
50
- return false;
51
+ return finish_folding(ctx, op);
98
}
52
}
99
53
100
-/* Must be called before using the QEMU cpus. 'tb_size' is the size
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
101
- (in bytes) allocated to the translation buffer. Zero means default
102
- size. */
103
-void tcg_exec_init(unsigned long tb_size, int splitwx)
104
-{
105
- tcg_allowed = true;
106
- page_init();
107
- tb_htable_init();
108
- tcg_init(tb_size, splitwx);
109
-
110
-#if defined(CONFIG_SOFTMMU)
111
- /* There's no guest base to take into account, so go ahead and
112
- initialize the prologue now. */
113
- tcg_prologue_init(tcg_ctx);
114
-#endif
115
-}
116
-
117
/* call with @p->lock held */
118
static inline void invalidate_page_bitmap(PageDesc *p)
119
{
120
diff --git a/bsd-user/main.c b/bsd-user/main.c
121
index XXXXXXX..XXXXXXX 100644
122
--- a/bsd-user/main.c
123
+++ b/bsd-user/main.c
124
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
125
envlist_free(envlist);
126
127
/*
128
- * Now that page sizes are configured in tcg_exec_init() we can do
129
+ * Now that page sizes are configured we can do
130
* proper page alignment for guest_base.
131
*/
132
guest_base = HOST_PAGE_ALIGN(guest_base);
133
--
55
--
134
2.25.1
56
2.43.0
135
136
diff view generated by jsdifflib
1
Introduce a function to remove everything emitted
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
since a given point.
3
2
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
include/tcg/tcg.h | 10 ++++++++++
5
tcg/optimize.c | 20 +++++++++++++++++---
8
tcg/tcg.c | 13 +++++++++++++
6
1 file changed, 17 insertions(+), 3 deletions(-)
9
2 files changed, 23 insertions(+)
10
7
11
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/include/tcg/tcg.h
10
--- a/tcg/optimize.c
14
+++ b/include/tcg/tcg.h
11
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ void tcg_op_remove(TCGContext *s, TCGOp *op);
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
16
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc);
13
return ts_info(arg_temp(arg));
17
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc);
18
19
+/**
20
+ * tcg_remove_ops_after:
21
+ * @op: target operation
22
+ *
23
+ * Discard any opcodes emitted since @op. Expected usage is to save
24
+ * a starting point with tcg_last_op(), speculatively emit opcodes,
25
+ * then decide whether or not to keep those opcodes after the fact.
26
+ */
27
+void tcg_remove_ops_after(TCGOp *op);
28
+
29
void tcg_optimize(TCGContext *s);
30
31
/* Allocate a new temporary and initialize it with a constant. */
32
diff --git a/tcg/tcg.c b/tcg/tcg.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/tcg.c
35
+++ b/tcg/tcg.c
36
@@ -XXX,XX +XXX,XX @@ void tcg_op_remove(TCGContext *s, TCGOp *op)
37
#endif
38
}
14
}
39
15
40
+void tcg_remove_ops_after(TCGOp *op)
16
+static inline bool ti_is_const(TempOptInfo *ti)
41
+{
17
+{
42
+ TCGContext *s = tcg_ctx;
18
+ return ti->is_const;
43
+
44
+ while (true) {
45
+ TCGOp *last = tcg_last_op();
46
+ if (last == op) {
47
+ return;
48
+ }
49
+ tcg_op_remove(s, last);
50
+ }
51
+}
19
+}
52
+
20
+
53
static TCGOp *tcg_op_alloc(TCGOpcode opc)
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
22
+{
23
+ return ti->val;
24
+}
25
+
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
27
+{
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
29
+}
30
+
31
static inline bool ts_is_const(TCGTemp *ts)
54
{
32
{
55
TCGContext *s = tcg_ctx;
33
- return ts_info(ts)->is_const;
34
+ return ti_is_const(ts_info(ts));
35
}
36
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
38
{
39
- TempOptInfo *ti = ts_info(ts);
40
- return ti->is_const && ti->val == val;
41
+ return ti_is_const_val(ts_info(ts), val);
42
}
43
44
static inline bool arg_is_const(TCGArg arg)
56
--
45
--
57
2.25.1
46
2.43.0
58
59
diff view generated by jsdifflib
1
Instead of delaying tcg_region_init until after tcg_prologue_init
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
is complete, do tcg_region_init first and let tcg_prologue_init
2
Sink mask computation below fold_affected_mask early exit.
3
shrink the first region by the size of the generated prologue.
4
3
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
accel/tcg/tcg-all.c | 11 ---------
7
tcg/optimize.c | 30 ++++++++++++++++--------------
10
accel/tcg/translate-all.c | 3 +++
8
1 file changed, 16 insertions(+), 14 deletions(-)
11
bsd-user/main.c | 1 -
12
linux-user/main.c | 1 -
13
tcg/tcg.c | 52 ++++++++++++++-------------------------
14
5 files changed, 22 insertions(+), 46 deletions(-)
15
9
16
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/tcg-all.c
12
--- a/tcg/optimize.c
19
+++ b/accel/tcg/tcg-all.c
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static int tcg_init(MachineState *ms)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
21
15
22
tcg_exec_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
16
static bool fold_and(OptContext *ctx, TCGOp *op)
23
mttcg_enabled = s->mttcg_enabled;
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
24
-
31
-
25
- /*
32
- /*
26
- * Initialize TCG regions only for softmmu.
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
27
- *
34
- * Bitwise operations preserve the relative quantity of the repetitions.
28
- * This needs to be done later for user mode, because the prologue
29
- * generation needs to be delayed so that GUEST_BASE is already set.
30
- */
35
- */
31
-#ifndef CONFIG_USER_ONLY
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
32
- tcg_region_init();
37
- & arg_info(op->args[2])->s_mask;
33
-#endif /* !CONFIG_USER_ONLY */
38
+ t1 = arg_info(op->args[1]);
34
-
39
+ t2 = arg_info(op->args[2]);
35
return 0;
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
63
}
37
64
38
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
39
index XXXXXXX..XXXXXXX 100644
40
--- a/accel/tcg/translate-all.c
41
+++ b/accel/tcg/translate-all.c
42
@@ -XXX,XX +XXX,XX @@ void tcg_exec_init(unsigned long tb_size, int splitwx)
43
splitwx, &error_fatal);
44
assert(ok);
45
46
+ /* TODO: allocating regions is hand-in-glove with code_gen_buffer. */
47
+ tcg_region_init();
48
+
49
#if defined(CONFIG_SOFTMMU)
50
/* There's no guest base to take into account, so go ahead and
51
initialize the prologue now. */
52
diff --git a/bsd-user/main.c b/bsd-user/main.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/bsd-user/main.c
55
+++ b/bsd-user/main.c
56
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
57
* the real value of GUEST_BASE into account.
58
*/
59
tcg_prologue_init(tcg_ctx);
60
- tcg_region_init();
61
62
/* build Task State */
63
memset(ts, 0, sizeof(TaskState));
64
diff --git a/linux-user/main.c b/linux-user/main.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/linux-user/main.c
67
+++ b/linux-user/main.c
68
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
69
generating the prologue until now so that the prologue can take
70
the real value of GUEST_BASE into account. */
71
tcg_prologue_init(tcg_ctx);
72
- tcg_region_init();
73
74
target_cpu_copy_regs(env, regs);
75
76
diff --git a/tcg/tcg.c b/tcg/tcg.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/tcg/tcg.c
79
+++ b/tcg/tcg.c
80
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tcg_tb_alloc(TCGContext *s)
81
82
void tcg_prologue_init(TCGContext *s)
83
{
84
- size_t prologue_size, total_size;
85
- void *buf0, *buf1;
86
+ size_t prologue_size;
87
88
/* Put the prologue at the beginning of code_gen_buffer. */
89
- buf0 = s->code_gen_buffer;
90
- total_size = s->code_gen_buffer_size;
91
- s->code_ptr = buf0;
92
- s->code_buf = buf0;
93
+ tcg_region_assign(s, 0);
94
+ s->code_ptr = s->code_gen_ptr;
95
+ s->code_buf = s->code_gen_ptr;
96
s->data_gen_ptr = NULL;
97
98
- /*
99
- * The region trees are not yet configured, but tcg_splitwx_to_rx
100
- * needs the bounds for an assert.
101
- */
102
- region.start = buf0;
103
- region.end = buf0 + total_size;
104
-
105
#ifndef CONFIG_TCG_INTERPRETER
106
- tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(buf0);
107
+ tcg_qemu_tb_exec = (tcg_prologue_fn *)tcg_splitwx_to_rx(s->code_ptr);
108
#endif
109
110
- /* Compute a high-water mark, at which we voluntarily flush the buffer
111
- and start over. The size here is arbitrary, significantly larger
112
- than we expect the code generation for any one opcode to require. */
113
- s->code_gen_highwater = s->code_gen_buffer + (total_size - TCG_HIGHWATER);
114
-
115
#ifdef TCG_TARGET_NEED_POOL_LABELS
116
s->pool_labels = NULL;
117
#endif
118
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
119
}
120
#endif
121
122
- buf1 = s->code_ptr;
123
+ prologue_size = tcg_current_code_size(s);
124
+
125
#ifndef CONFIG_TCG_INTERPRETER
126
- flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(buf0), (uintptr_t)buf0,
127
- tcg_ptr_byte_diff(buf1, buf0));
128
+ flush_idcache_range((uintptr_t)tcg_splitwx_to_rx(s->code_buf),
129
+ (uintptr_t)s->code_buf, prologue_size);
130
#endif
131
132
- /* Deduct the prologue from the buffer. */
133
- prologue_size = tcg_current_code_size(s);
134
- s->code_gen_ptr = buf1;
135
- s->code_gen_buffer = buf1;
136
- s->code_buf = buf1;
137
- total_size -= prologue_size;
138
- s->code_gen_buffer_size = total_size;
139
+ /* Deduct the prologue from the first region. */
140
+ region.start = s->code_ptr;
141
142
- tcg_register_jit(tcg_splitwx_to_rx(s->code_gen_buffer), total_size);
143
+ /* Recompute boundaries of the first region. */
144
+ tcg_region_assign(s, 0);
145
+
146
+ tcg_register_jit(tcg_splitwx_to_rx(region.start),
147
+ region.end - region.start);
148
149
#ifdef DEBUG_DISAS
150
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
151
FILE *logfile = qemu_log_lock();
152
qemu_log("PROLOGUE: [size=%zu]\n", prologue_size);
153
if (s->data_gen_ptr) {
154
- size_t code_size = s->data_gen_ptr - buf0;
155
+ size_t code_size = s->data_gen_ptr - s->code_gen_ptr;
156
size_t data_size = prologue_size - code_size;
157
size_t i;
158
159
- log_disas(buf0, code_size);
160
+ log_disas(s->code_gen_ptr, code_size);
161
162
for (i = 0; i < data_size; i += sizeof(tcg_target_ulong)) {
163
if (sizeof(tcg_target_ulong) == 8) {
164
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
165
}
166
}
167
} else {
168
- log_disas(buf0, prologue_size);
169
+ log_disas(s->code_gen_ptr, prologue_size);
170
}
171
qemu_log("\n");
172
qemu_log_flush();
173
--
66
--
174
2.25.1
67
2.43.0
175
176
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
1
Return output buffer and size via output pointer arguments,
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
rather than returning size via tcg_ctx->code_gen_buffer_size.
2
Avoid the use of the OptContext slots.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/region.c | 19 +++++++++----------
7
tcg/optimize.c | 13 ++++++++++---
9
1 file changed, 9 insertions(+), 10 deletions(-)
8
1 file changed, 10 insertions(+), 3 deletions(-)
10
9
11
diff --git a/tcg/region.c b/tcg/region.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/region.c
12
--- a/tcg/optimize.c
14
+++ b/tcg/region.c
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static inline bool cross_256mb(void *addr, size_t size)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
16
/*
17
* We weren't able to allocate a buffer without crossing that boundary,
18
* so make do with the larger portion of the buffer that doesn't cross.
19
- * Returns the new base of the buffer, and adjusts code_gen_buffer_size.
20
+ * Returns the new base and size of the buffer in *obuf and *osize.
21
*/
22
-static inline void *split_cross_256mb(void *buf1, size_t size1)
23
+static inline void split_cross_256mb(void **obuf, size_t *osize,
24
+ void *buf1, size_t size1)
25
{
26
void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
27
size_t size2 = buf1 + size1 - buf2;
28
@@ -XXX,XX +XXX,XX @@ static inline void *split_cross_256mb(void *buf1, size_t size1)
29
buf1 = buf2;
30
}
31
32
- tcg_ctx->code_gen_buffer_size = size1;
33
- return buf1;
34
+ *obuf = buf1;
35
+ *osize = size1;
36
}
37
#endif
38
39
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
40
if (size > tb_size) {
41
size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
42
}
43
- tcg_ctx->code_gen_buffer_size = size;
44
45
#ifdef __mips__
46
if (cross_256mb(buf, size)) {
47
- buf = split_cross_256mb(buf, size);
48
- size = tcg_ctx->code_gen_buffer_size;
49
+ split_cross_256mb(&buf, &size, buf, size);
50
}
51
#endif
52
53
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
54
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
55
56
tcg_ctx->code_gen_buffer = buf;
57
+ tcg_ctx->code_gen_buffer_size = size;
58
return true;
15
return true;
59
}
16
}
60
#elif defined(_WIN32)
17
61
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
62
"allocate %zu bytes for jit buffer", size);
19
+{
63
return false;
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
64
}
34
}
65
- tcg_ctx->code_gen_buffer_size = size;
35
66
36
switch (ctx->type) {
67
#ifdef __mips__
37
case TCG_TYPE_I32:
68
if (cross_256mb(buf, size)) {
38
- ctx->z_mask = 32 | 31;
69
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
39
+ z_mask = 32 | 31;
70
/* fallthru */
40
break;
71
default:
41
case TCG_TYPE_I64:
72
/* Split the original buffer. Free the smaller half. */
42
- ctx->z_mask = 64 | 63;
73
- buf2 = split_cross_256mb(buf, size);
43
+ z_mask = 64 | 63;
74
- size2 = tcg_ctx->code_gen_buffer_size;
44
break;
75
+ split_cross_256mb(&buf2, &size2, buf, size);
45
default:
76
if (buf == buf2) {
46
g_assert_not_reached();
77
munmap(buf + size2, size - size2);
47
}
78
} else {
48
- return false;
79
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
49
+ return fold_masks_z(ctx, op, z_mask);
80
qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
81
82
tcg_ctx->code_gen_buffer = buf;
83
+ tcg_ctx->code_gen_buffer_size = size;
84
return true;
85
}
50
}
86
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
87
--
53
--
88
2.25.1
54
2.43.0
89
90
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
1
Move the call out of the N versions of alloc_code_gen_buffer
1
The input which overlaps the sign bit of the output can
2
and into tcg_region_init.
2
have its input s_mask propagated to the output s_mask.
3
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/region.c | 14 +++++++-------
7
tcg/optimize.c | 14 ++++++++++++--
9
1 file changed, 7 insertions(+), 7 deletions(-)
8
1 file changed, 12 insertions(+), 2 deletions(-)
10
9
11
diff --git a/tcg/region.c b/tcg/region.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/region.c
12
--- a/tcg/optimize.c
14
+++ b/tcg/region.c
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
16
error_setg_errno(errp, errno, "mprotect of jit buffer");
15
TempOptInfo *t2 = arg_info(op->args[2]);
17
return false;
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
18
}
39
}
19
- qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
40
20
41
+ /* The s_mask from the top portion of the deposit is still valid. */
21
region.start_aligned = buf;
42
+ if (ofs + len == width) {
22
region.total_size = size;
43
+ s_mask = t2->s_mask << ofs;
23
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer_anon(size_t size, int prot,
44
+ } else {
24
}
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
25
#endif
26
27
- /* Request large pages for the buffer. */
28
- qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
29
-
30
region.start_aligned = buf;
31
region.total_size = size;
32
return prot;
33
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
34
region.total_size = size;
35
tcg_splitwx_diff = buf_rx - buf_rw;
36
37
- /* Request large pages for the buffer and the splitwx. */
38
- qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
39
- qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
40
return PROT_READ | PROT_WRITE;
41
42
fail_rx:
43
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
44
splitwx, &error_fatal);
45
assert(have_prot >= 0);
46
47
+ /* Request large pages for the buffer and the splitwx. */
48
+ qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE);
49
+ if (tcg_splitwx_diff) {
50
+ qemu_madvise(region.start_aligned + tcg_splitwx_diff,
51
+ region.total_size, QEMU_MADV_HUGEPAGE);
52
+ }
46
+ }
53
+
47
+
54
/*
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
55
* Make region_size a multiple of page_size, using aligned as the start.
49
- return fold_masks_z(ctx, op, z_mask);
56
* As a result of this we might end up with a few extra pages at the end of
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
57
--
54
--
58
2.25.1
55
2.43.0
59
60
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
1
This has only one user, and currently needs an ifdef,
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
but will make more sense after some code motion.
2
Avoid the use of the OptContext slots.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/tcg.c | 13 ++++++++++---
7
tcg/optimize.c | 13 ++++++++++---
9
1 file changed, 10 insertions(+), 3 deletions(-)
8
1 file changed, 10 insertions(+), 3 deletions(-)
10
9
11
diff --git a/tcg/tcg.c b/tcg/tcg.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/tcg.c
12
--- a/tcg/optimize.c
14
+++ b/tcg/tcg.c
13
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void tcg_region_initial_alloc__locked(TCGContext *s)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
16
g_assert(!err);
15
return fold_masks_zs(ctx, op, z_mask, 0);
17
}
16
}
18
17
19
+#ifndef CONFIG_USER_ONLY
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
20
+static void tcg_region_initial_alloc(TCGContext *s)
21
+{
19
+{
22
+ qemu_mutex_lock(&region.lock);
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
23
+ tcg_region_initial_alloc__locked(s);
24
+ qemu_mutex_unlock(&region.lock);
25
+}
21
+}
26
+#endif
27
+
22
+
28
/* Call from a safe-work context */
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
void tcg_region_reset_all(void)
30
{
24
{
31
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
32
}
36
}
33
37
34
tcg_ctx = s;
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
35
- qemu_mutex_lock(&region.lock);
39
- & arg_info(op->args[2])->s_mask;
36
- tcg_region_initial_alloc__locked(s);
40
- return false;
37
- qemu_mutex_unlock(&region.lock);
41
+ s_mask = arg_info(op->args[1])->s_mask
38
+ tcg_region_initial_alloc(s);
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
39
}
44
}
40
#endif /* !CONFIG_USER_ONLY */
45
41
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
42
--
47
--
43
2.25.1
48
2.43.0
44
45
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
1
We shortly want to use tcg_init for something else.
1
Avoid the use of the OptContext slots.
2
Since the hook is called init_machine, match that.
3
2
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
accel/tcg/tcg-all.c | 4 ++--
6
tcg/optimize.c | 4 ++--
10
1 file changed, 2 insertions(+), 2 deletions(-)
7
1 file changed, 2 insertions(+), 2 deletions(-)
11
8
12
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/accel/tcg/tcg-all.c
11
--- a/tcg/optimize.c
15
+++ b/accel/tcg/tcg-all.c
12
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static void tcg_accel_instance_init(Object *obj)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
17
14
g_assert_not_reached();
18
bool mttcg_enabled;
15
}
19
16
20
-static int tcg_init(MachineState *ms)
17
- ctx->z_mask = z_mask;
21
+static int tcg_init_machine(MachineState *ms)
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
22
{
19
return true;
23
TCGState *s = TCG_STATE(current_accel());
20
}
24
21
- return fold_masks(ctx, op);
25
@@ -XXX,XX +XXX,XX @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
22
+
26
{
23
+ return fold_masks_z(ctx, op, z_mask);
27
AccelClass *ac = ACCEL_CLASS(oc);
24
}
28
ac->name = "tcg";
25
29
- ac->init_machine = tcg_init;
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
30
+ ac->init_machine = tcg_init_machine;
31
ac->allowed = &tcg_allowed;
32
33
object_class_property_add_str(oc, "thread",
34
--
27
--
35
2.25.1
28
2.43.0
36
37
diff view generated by jsdifflib
1
There's a change in mprotect() behaviour [1] in the latest macOS
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
on M1 and it's not yet clear if it's going to be fixed by Apple.
3
2
4
In this case, instead of changing permissions of N guard pages,
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
we change permissions of N rwx regions. The same number of
6
syscalls are required either way.
7
8
[1] https://gist.github.com/hikalium/75ae822466ee4da13cbbe486498a191f
9
10
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
5
---
13
tcg/region.c | 19 +++++++++----------
6
tcg/optimize.c | 19 +++++++++++--------
14
1 file changed, 9 insertions(+), 10 deletions(-)
7
1 file changed, 11 insertions(+), 8 deletions(-)
15
8
16
diff --git a/tcg/region.c b/tcg/region.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/region.c
11
--- a/tcg/optimize.c
19
+++ b/tcg/region.c
12
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
21
error_free_or_abort(errp);
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
22
}
24
}
23
25
24
- prot = PROT_READ | PROT_WRITE | PROT_EXEC;
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
25
+ /*
27
- | arg_info(op->args[4])->z_mask;
26
+ * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
27
+ * rejects a permission change from RWX -> NONE when reserving the
29
- & arg_info(op->args[4])->s_mask;
28
+ * guard pages later. We can go the other way with the same number
30
+ tt = arg_info(op->args[3]);
29
+ * of syscalls, so always begin with PROT_NONE.
31
+ ft = arg_info(op->args[4]);
30
+ */
32
+ z_mask = tt->z_mask | ft->z_mask;
31
+ prot = PROT_NONE;
33
+ s_mask = tt->s_mask & ft->s_mask;
32
flags = MAP_PRIVATE | MAP_ANONYMOUS;
34
33
-#ifdef CONFIG_TCG_INTERPRETER
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
34
- /* The tcg interpreter does not need execute permission. */
36
- uint64_t tv = arg_info(op->args[3])->val;
35
- prot = PROT_READ | PROT_WRITE;
37
- uint64_t fv = arg_info(op->args[4])->val;
36
-#elif defined(CONFIG_DARWIN)
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
37
+#ifdef CONFIG_DARWIN
39
+ uint64_t tv = ti_const_val(tt);
38
/* Applicable to both iOS and macOS (Apple Silicon). */
40
+ uint64_t fv = ti_const_val(ft);
39
if (!splitwx) {
41
TCGOpcode opc, negopc = 0;
40
flags |= MAP_JIT;
42
TCGCond cond = op->args[5];
41
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
42
}
45
}
43
}
46
}
44
if (have_prot != 0) {
45
- /*
46
- * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
47
- * rejects a permission change from RWX -> NONE. Guard pages are
48
- * nice for bug detection but are not essential; ignore any failure.
49
- */
50
+ /* Guard pages are nice for bug detection but are not essential. */
51
(void)qemu_mprotect_none(end, page_size);
52
}
53
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
54
--
55
2.25.1
55
2.43.0
56
57
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 6 +++---
5
1 file changed, 3 insertions(+), 3 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
21
fold_xi_to_i(ctx, op, 0)) {
22
return true;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
30
tcg_opt_gen_movi(ctx, op2, rh, h);
31
return true;
32
}
33
- return false;
34
+ return finish_folding(ctx, op);
35
}
36
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
38
--
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
1
For --enable-tcg-interpreter on Windows, we will need this.
1
Avoid the use of the OptContext slots.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Be careful not to call fold_masks_zs when the memory operation
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
is wide enough to require multiple outputs, so split into two
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
9
---
8
include/qemu/osdep.h | 1 +
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
9
util/osdep.c | 9 +++++++++
11
1 file changed, 21 insertions(+), 5 deletions(-)
10
2 files changed, 10 insertions(+)
11
12
12
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/include/qemu/osdep.h
15
--- a/tcg/optimize.c
15
+++ b/include/qemu/osdep.h
16
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ void sigaction_invoke(struct sigaction *action,
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
17
#endif
18
return fold_masks_s(ctx, op, s_mask);
18
19
int qemu_madvise(void *addr, size_t len, int advice);
20
+int qemu_mprotect_rw(void *addr, size_t size);
21
int qemu_mprotect_rwx(void *addr, size_t size);
22
int qemu_mprotect_none(void *addr, size_t size);
23
24
diff --git a/util/osdep.c b/util/osdep.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/util/osdep.c
27
+++ b/util/osdep.c
28
@@ -XXX,XX +XXX,XX @@ static int qemu_mprotect__osdep(void *addr, size_t size, int prot)
29
#endif
30
}
19
}
31
20
32
+int qemu_mprotect_rw(void *addr, size_t size)
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
33
+{
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
34
+#ifdef _WIN32
23
{
35
+ return qemu_mprotect__osdep(addr, size, PAGE_READWRITE);
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
36
+#else
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
37
+ return qemu_mprotect__osdep(addr, size, PROT_READ | PROT_WRITE);
26
MemOp mop = get_memop(oi);
38
+#endif
27
int width = 8 * memop_size(mop);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
43
+
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
39
+}
45
+}
40
+
46
+
41
int qemu_mprotect_rwx(void *addr, size_t size)
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
42
{
48
+{
43
#ifdef _WIN32
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
50
+ ctx->prev_mb = NULL;
51
+ return finish_folding(ctx, op);
52
}
53
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
56
break;
57
case INDEX_op_qemu_ld_a32_i32:
58
case INDEX_op_qemu_ld_a64_i32:
59
+ done = fold_qemu_ld_1reg(&ctx, op);
60
+ break;
61
case INDEX_op_qemu_ld_a32_i64:
62
case INDEX_op_qemu_ld_a64_i64:
63
+ if (TCG_TARGET_REG_BITS == 64) {
64
+ done = fold_qemu_ld_1reg(&ctx, op);
65
+ break;
66
+ }
67
+ QEMU_FALLTHROUGH;
68
case INDEX_op_qemu_ld_a32_i128:
69
case INDEX_op_qemu_ld_a64_i128:
70
- done = fold_qemu_ld(&ctx, op);
71
+ done = fold_qemu_ld_2reg(&ctx, op);
72
break;
73
case INDEX_op_qemu_st8_a32_i32:
74
case INDEX_op_qemu_st8_a64_i32:
44
--
75
--
45
2.25.1
76
2.43.0
46
47
diff view generated by jsdifflib
New patch
1
Stores have no output operands, and so need no further work.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 11 +++++------
7
1 file changed, 5 insertions(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
14
{
15
/* Opcodes that touch guest memory stop the mb optimization. */
16
ctx->prev_mb = NULL;
17
- return false;
18
+ return true;
19
}
20
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
23
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
25
remove_mem_copy_all(ctx);
26
- return false;
27
+ return true;
28
}
29
30
switch (op->opc) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
32
g_assert_not_reached();
33
}
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
35
- return false;
36
+ return true;
37
}
38
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
41
TCGType type;
42
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
44
- fold_tcg_st(ctx, op);
45
- return false;
46
+ return fold_tcg_st(ctx, op);
47
}
48
49
src = arg_temp(op->args[0]);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
51
last = ofs + tcg_type_size(type) - 1;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
55
+ return true;
56
}
57
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
59
--
60
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return finish_folding(ctx, op);
16
}
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
71
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
Do not handle protections on a case-by-case basis in the
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
various alloc_code_gen_buffer instances; do it within a
3
single loop in tcg_region_init.
4
2
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
tcg/region.c | 45 +++++++++++++++++++++++++++++++--------------
6
tcg/optimize.c | 27 ++++++++++++++-------------
10
1 file changed, 31 insertions(+), 14 deletions(-)
7
1 file changed, 14 insertions(+), 13 deletions(-)
11
8
12
diff --git a/tcg/region.c b/tcg/region.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/region.c
11
--- a/tcg/optimize.c
15
+++ b/tcg/region.c
12
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
17
}
23
}
18
#endif
24
19
25
- s_mask = arg_info(op->args[1])->s_mask;
20
- if (qemu_mprotect_rwx(buf, size)) {
26
- z_mask = arg_info(op->args[1])->z_mask;
21
- error_setg_errno(errp, errno, "mprotect of jit buffer");
27
+ t1 = arg_info(op->args[1]);
22
- return false;
28
+ t2 = arg_info(op->args[2]);
23
- }
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
24
-
34
-
25
region.start_aligned = buf;
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
26
region.total_size = size;
36
+ if (ti_is_const(t2)) {
27
37
+ int sh = ti_const_val(t2);
28
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
38
29
{
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
30
const size_t page_size = qemu_real_host_page_size;
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
31
size_t region_size;
41
32
- size_t i;
42
- return fold_masks(ctx, op);
33
- int have_prot;
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
34
+ int have_prot, need_prot;
35
36
/* Size the buffer. */
37
if (tb_size == 0) {
38
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
39
* Set guard pages in the rw buffer, as that's the one into which
40
* buffer overruns could occur. Do not set guard pages in the rx
41
* buffer -- let that one use hugepages throughout.
42
+ * Work with the page protections set up with the initial mapping.
43
*/
44
- for (i = 0; i < region.n; i++) {
45
+ need_prot = PAGE_READ | PAGE_WRITE;
46
+#ifndef CONFIG_TCG_INTERPRETER
47
+ if (tcg_splitwx_diff == 0) {
48
+ need_prot |= PAGE_EXEC;
49
+ }
50
+#endif
51
+ for (size_t i = 0, n = region.n; i < n; i++) {
52
void *start, *end;
53
54
tcg_region_bounds(i, &start, &end);
55
+ if (have_prot != need_prot) {
56
+ int rc;
57
58
- /*
59
- * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
60
- * rejects a permission change from RWX -> NONE. Guard pages are
61
- * nice for bug detection but are not essential; ignore any failure.
62
- */
63
- (void)qemu_mprotect_none(end, page_size);
64
+ if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) {
65
+ rc = qemu_mprotect_rwx(start, end - start);
66
+ } else if (need_prot == (PAGE_READ | PAGE_WRITE)) {
67
+ rc = qemu_mprotect_rw(start, end - start);
68
+ } else {
69
+ g_assert_not_reached();
70
+ }
71
+ if (rc) {
72
+ error_setg_errno(&error_fatal, errno,
73
+ "mprotect of jit buffer");
74
+ }
75
+ }
76
+ if (have_prot != 0) {
77
+ /*
78
+ * macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
79
+ * rejects a permission change from RWX -> NONE. Guard pages are
80
+ * nice for bug detection but are not essential; ignore any failure.
81
+ */
82
+ (void)qemu_mprotect_none(end, page_size);
83
+ }
84
}
44
}
85
45
86
tcg_region_trees_init();
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
66
break;
67
default:
68
break;
69
}
70
71
- return false;
72
+ return finish_folding(ctx, op);
73
}
74
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
87
--
76
--
88
2.25.1
77
2.43.0
89
90
diff view generated by jsdifflib
New patch
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
16
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t s_mask, z_mask, sign;
20
+ uint64_t s_mask, z_mask;
21
TempOptInfo *t1, *t2;
22
23
if (fold_const2(ctx, op) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
25
* If the sign bit is known zero, then logical right shift
26
* will not reduce the number of input sign repetitions.
27
*/
28
- sign = -s_mask;
29
- if (sign && !(z_mask & sign)) {
30
+ if (~z_mask & -s_mask) {
31
return fold_masks_s(ctx, op, s_mask);
32
}
33
break;
34
--
35
2.43.0
diff view generated by jsdifflib
1
This has only one user, but will make more sense after some
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
code motion.
2
now that fold_sub_vec always returns true.
3
3
4
Always leave the tcg_init_ctx initialized to the first region,
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
in preparation for tcg_prologue_init(). This also requires
6
that we don't re-allocate the region for the first cpu, lest
7
we hit the assertion for total number of regions allocated .
8
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
tcg/tcg.c | 37 ++++++++++++++++++++++---------------
7
tcg/optimize.c | 9 ++++++---
14
1 file changed, 22 insertions(+), 15 deletions(-)
8
1 file changed, 6 insertions(+), 3 deletions(-)
15
9
16
diff --git a/tcg/tcg.c b/tcg/tcg.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tcg.c
12
--- a/tcg/optimize.c
19
+++ b/tcg/tcg.c
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(void)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
21
15
fold_sub_to_neg(ctx, op)) {
22
tcg_region_trees_init();
16
return true;
23
17
}
24
- /* In user-mode we support only one ctx, so do the initial allocation now */
18
- return false;
25
-#ifdef CONFIG_USER_ONLY
19
+ return finish_folding(ctx, op);
26
- tcg_region_initial_alloc__locked(tcg_ctx);
27
-#endif
28
+ /*
29
+ * Leave the initial context initialized to the first region.
30
+ * This will be the context into which we generate the prologue.
31
+ * It is also the only context for CONFIG_USER_ONLY.
32
+ */
33
+ tcg_region_initial_alloc__locked(&tcg_init_ctx);
34
+}
35
+
36
+static void tcg_region_prologue_set(TCGContext *s)
37
+{
38
+ /* Deduct the prologue from the first region. */
39
+ g_assert(region.start == s->code_gen_buffer);
40
+ region.start = s->code_ptr;
41
+
42
+ /* Recompute boundaries of the first region. */
43
+ tcg_region_assign(s, 0);
44
+
45
+ /* Register the balance of the buffer with gdb. */
46
+ tcg_register_jit(tcg_splitwx_to_rx(region.start),
47
+ region.end - region.start);
48
}
20
}
49
21
50
#ifdef CONFIG_DEBUG_TCG
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
51
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
23
{
52
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
53
if (n > 0) {
25
+ if (fold_const2(ctx, op) ||
54
alloc_tcg_plugin_context(s);
26
+ fold_xx_to_i(ctx, op, 0) ||
55
+ tcg_region_initial_alloc(s);
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
56
}
30
}
57
31
58
tcg_ctx = s;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
59
- tcg_region_initial_alloc(s);
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
60
}
38
}
61
#endif /* !CONFIG_USER_ONLY */
39
62
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
63
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
64
{
65
size_t prologue_size;
66
67
- /* Put the prologue at the beginning of code_gen_buffer. */
68
- tcg_region_assign(s, 0);
69
s->code_ptr = s->code_gen_ptr;
70
s->code_buf = s->code_gen_ptr;
71
s->data_gen_ptr = NULL;
72
@@ -XXX,XX +XXX,XX @@ void tcg_prologue_init(TCGContext *s)
73
(uintptr_t)s->code_buf, prologue_size);
74
#endif
75
76
- /* Deduct the prologue from the first region. */
77
- region.start = s->code_ptr;
78
-
79
- /* Recompute boundaries of the first region. */
80
- tcg_region_assign(s, 0);
81
-
82
- tcg_register_jit(tcg_splitwx_to_rx(region.start),
83
- region.end - region.start);
84
+ tcg_region_prologue_set(s);
85
86
#ifdef DEBUG_DISAS
87
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
88
--
41
--
89
2.25.1
42
2.43.0
90
91
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 16 +++++++++-------
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
1
Finish the divorce of tcg/ from hw/, and do not take
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
the max cpu value from MachineState; just remember what
3
we were passed in tcg_init.
4
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
3
---
10
tcg/tcg-internal.h | 3 ++-
4
tcg/optimize.c | 2 +-
11
tcg/region.c | 6 +++---
5
1 file changed, 1 insertion(+), 1 deletion(-)
12
tcg/tcg.c | 23 ++++++++++-------------
13
3 files changed, 15 insertions(+), 17 deletions(-)
14
6
15
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/tcg-internal.h
9
--- a/tcg/optimize.c
18
+++ b/tcg/tcg-internal.h
10
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
20
#define TCG_HIGHWATER 1024
12
TCGType type;
21
13
22
extern TCGContext **tcg_ctxs;
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
23
-extern unsigned int n_tcg_ctxs;
15
- return false;
24
+extern unsigned int tcg_cur_ctxs;
16
+ return finish_folding(ctx, op);
25
+extern unsigned int tcg_max_ctxs;
26
27
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus);
28
bool tcg_region_alloc(TCGContext *s);
29
diff --git a/tcg/region.c b/tcg/region.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/region.c
32
+++ b/tcg/region.c
33
@@ -XXX,XX +XXX,XX @@ void tcg_region_initial_alloc(TCGContext *s)
34
/* Call from a safe-work context */
35
void tcg_region_reset_all(void)
36
{
37
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
38
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
39
unsigned int i;
40
41
qemu_mutex_lock(&region.lock);
42
@@ -XXX,XX +XXX,XX @@ void tcg_region_prologue_set(TCGContext *s)
43
*/
44
size_t tcg_code_size(void)
45
{
46
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
47
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
48
unsigned int i;
49
size_t total;
50
51
@@ -XXX,XX +XXX,XX @@ size_t tcg_code_capacity(void)
52
53
size_t tcg_tb_phys_invalidate_count(void)
54
{
55
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
56
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
57
unsigned int i;
58
size_t total = 0;
59
60
diff --git a/tcg/tcg.c b/tcg/tcg.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/tcg.c
63
+++ b/tcg/tcg.c
64
@@ -XXX,XX +XXX,XX @@
65
#define NO_CPU_IO_DEFS
66
67
#include "exec/exec-all.h"
68
-
69
-#if !defined(CONFIG_USER_ONLY)
70
-#include "hw/boards.h"
71
-#endif
72
-
73
#include "tcg/tcg-op.h"
74
75
#if UINTPTR_MAX == UINT32_MAX
76
@@ -XXX,XX +XXX,XX @@ static int tcg_out_ldst_finalize(TCGContext *s);
77
#endif
78
79
TCGContext **tcg_ctxs;
80
-unsigned int n_tcg_ctxs;
81
+unsigned int tcg_cur_ctxs;
82
+unsigned int tcg_max_ctxs;
83
TCGv_env cpu_env = 0;
84
const void *tcg_code_gen_epilogue;
85
uintptr_t tcg_splitwx_diff;
86
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
87
#else
88
void tcg_register_thread(void)
89
{
90
- MachineState *ms = MACHINE(qdev_get_machine());
91
TCGContext *s = g_malloc(sizeof(*s));
92
unsigned int i, n;
93
94
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
95
}
17
}
96
18
97
/* Claim an entry in tcg_ctxs */
19
type = ctx->type;
98
- n = qatomic_fetch_inc(&n_tcg_ctxs);
99
- g_assert(n < ms->smp.max_cpus);
100
+ n = qatomic_fetch_inc(&tcg_cur_ctxs);
101
+ g_assert(n < tcg_max_ctxs);
102
qatomic_set(&tcg_ctxs[n], s);
103
104
if (n > 0) {
105
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(unsigned max_cpus)
106
*/
107
#ifdef CONFIG_USER_ONLY
108
tcg_ctxs = &tcg_ctx;
109
- n_tcg_ctxs = 1;
110
+ tcg_cur_ctxs = 1;
111
+ tcg_max_ctxs = 1;
112
#else
113
- tcg_ctxs = g_new(TCGContext *, max_cpus);
114
+ tcg_max_ctxs = max_cpus;
115
+ tcg_ctxs = g_new0(TCGContext *, max_cpus);
116
#endif
117
118
tcg_debug_assert(!tcg_regset_test_reg(s->reserved_regs, TCG_AREG0));
119
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
120
static inline
121
void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table)
122
{
123
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
124
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
125
unsigned int i;
126
127
for (i = 0; i < n_ctxs; i++) {
128
@@ -XXX,XX +XXX,XX @@ void tcg_dump_op_count(void)
129
130
int64_t tcg_cpu_exec_time(void)
131
{
132
- unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs);
133
+ unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
134
unsigned int i;
135
int64_t ret = 0;
136
137
--
20
--
138
2.25.1
21
2.43.0
139
140
diff view generated by jsdifflib
1
Compute the value using straight division and bounds,
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
rather than a loop. Pass in tb_size rather than reading
2
Remove fold_masks as the function becomes unused.
3
from tcg_init_ctx.code_gen_buffer_size,
4
3
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
tcg/region.c | 29 ++++++++++++-----------------
7
tcg/optimize.c | 18 ++++++++----------
10
1 file changed, 12 insertions(+), 17 deletions(-)
8
1 file changed, 8 insertions(+), 10 deletions(-)
11
9
12
diff --git a/tcg/region.c b/tcg/region.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/region.c
12
--- a/tcg/optimize.c
15
+++ b/tcg/region.c
13
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ void tcg_region_reset_all(void)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
17
tcg_region_tree_reset_all();
15
return fold_masks_zs(ctx, op, -1, s_mask);
18
}
16
}
19
17
20
-static size_t tcg_n_regions(unsigned max_cpus)
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
21
+static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
19
-{
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
21
-}
22
-
23
/*
24
* An "affected" mask bit is 0 if and only if the result is identical
25
* to the first input. Thus if the entire mask is 0, the operation
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
27
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
22
{
29
{
23
#ifdef CONFIG_USER_ONLY
30
+ uint64_t z_mask, s_mask;
24
return 1;
31
+ TempOptInfo *t1, *t2;
25
#else
26
+ size_t n_regions;
27
+
32
+
28
/*
33
if (fold_const2_commutative(ctx, op) ||
29
* It is likely that some vCPUs will translate more code than others,
34
fold_xx_to_i(ctx, op, 0) ||
30
* so we first try to set more regions than max_cpus, with those regions
35
fold_xi_to_x(ctx, op, 0) ||
31
* being of reasonable size. If that's not possible we make do by evenly
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
32
* dividing the code_gen_buffer among the vCPUs.
37
return true;
33
*/
34
- size_t i;
35
-
36
/* Use a single region if all we have is one vCPU thread */
37
if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
38
return 1;
39
}
38
}
40
39
41
- /* Try to have more regions than max_cpus, with each region being >= 2 MB */
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
42
- for (i = 8; i > 0; i--) {
41
- | arg_info(op->args[2])->z_mask;
43
- size_t regions_per_thread = i;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
44
- size_t region_size;
43
- & arg_info(op->args[2])->s_mask;
45
-
44
- return fold_masks(ctx, op);
46
- region_size = tcg_init_ctx.code_gen_buffer_size;
45
+ t1 = arg_info(op->args[1]);
47
- region_size /= max_cpus * regions_per_thread;
46
+ t2 = arg_info(op->args[2]);
48
-
47
+ z_mask = t1->z_mask | t2->z_mask;
49
- if (region_size >= 2 * 1024u * 1024) {
48
+ s_mask = t1->s_mask & t2->s_mask;
50
- return max_cpus * regions_per_thread;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
- }
52
+ /*
53
+ * Try to have more regions than max_cpus, with each region being >= 2 MB.
54
+ * If we can't, then just allocate one region per vCPU thread.
55
+ */
56
+ n_regions = tb_size / (2 * MiB);
57
+ if (n_regions <= max_cpus) {
58
+ return max_cpus;
59
}
60
- /* If we can't, then just allocate one region per vCPU thread */
61
- return max_cpus;
62
+ return MIN(n_regions, max_cpus * 8);
63
#endif
64
}
50
}
65
51
66
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
67
buf = tcg_init_ctx.code_gen_buffer;
68
total_size = tcg_init_ctx.code_gen_buffer_size;
69
page_size = qemu_real_host_page_size;
70
- n_regions = tcg_n_regions(max_cpus);
71
+ n_regions = tcg_n_regions(total_size, max_cpus);
72
73
/* The first region will be 'aligned - buf' bytes larger than the others */
74
aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
75
--
53
--
76
2.25.1
54
2.43.0
77
78
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
/* Propagate constants and copies, fold constant expressions. */
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 ++----
8
1 file changed, 2 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
21
-
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
All mask setting is now done with parameters via fold_masks_*.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 -------------
7
1 file changed, 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
15
16
/* In flight values from optimization. */
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
47
--
48
2.43.0
diff view generated by jsdifflib
1
Change the interface from a boolean error indication to a
1
All instances of s_mask have been converted to the new
2
negative error vs a non-negative protection. For the moment
2
representation. We can now re-enable usage.
3
this is only interface change, not making use of the new data.
4
3
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
tcg/region.c | 63 +++++++++++++++++++++++++++-------------------------
7
tcg/optimize.c | 4 ++--
10
1 file changed, 33 insertions(+), 30 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
11
9
12
diff --git a/tcg/region.c b/tcg/region.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/region.c
12
--- a/tcg/optimize.c
15
+++ b/tcg/region.c
13
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static inline void split_cross_256mb(void **obuf, size_t *osize,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
17
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
15
g_assert_not_reached();
18
__attribute__((aligned(CODE_GEN_ALIGN)));
19
20
-static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
21
+static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
22
{
23
void *buf, *end;
24
size_t size;
25
26
if (splitwx > 0) {
27
error_setg(errp, "jit split-wx not supported");
28
- return false;
29
+ return -1;
30
}
16
}
31
17
32
/* page-align the beginning and end of the buffer */
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
33
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
34
20
return true;
35
region.start_aligned = buf;
36
region.total_size = size;
37
- return true;
38
+
39
+ return PROT_READ | PROT_WRITE;
40
}
41
#elif defined(_WIN32)
42
-static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
43
+static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
44
{
45
void *buf;
46
47
if (splitwx > 0) {
48
error_setg(errp, "jit split-wx not supported");
49
- return false;
50
+ return -1;
51
}
21
}
52
22
53
buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
54
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
24
s_mask = s_mask_old >> pos;
55
25
s_mask |= -1ull << (len - 1);
56
region.start_aligned = buf;
26
57
region.total_size = size;
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
58
- return true;
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
59
+
29
return true;
60
+ return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
61
}
62
#else
63
-static bool alloc_code_gen_buffer_anon(size_t size, int prot,
64
- int flags, Error **errp)
65
+static int alloc_code_gen_buffer_anon(size_t size, int prot,
66
+ int flags, Error **errp)
67
{
68
void *buf;
69
70
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
71
if (buf == MAP_FAILED) {
72
error_setg_errno(errp, errno,
73
"allocate %zu bytes for jit buffer", size);
74
- return false;
75
+ return -1;
76
}
30
}
77
31
78
#ifdef __mips__
79
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_anon(size_t size, int prot,
80
81
region.start_aligned = buf;
82
region.total_size = size;
83
- return true;
84
+ return prot;
85
}
86
87
#ifndef CONFIG_TCG_INTERPRETER
88
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
89
90
#ifdef __mips__
91
/* Find space for the RX mapping, vs the 256MiB regions. */
92
- if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
93
- MAP_PRIVATE | MAP_ANONYMOUS |
94
- MAP_NORESERVE, errp)) {
95
+ if (alloc_code_gen_buffer_anon(size, PROT_NONE,
96
+ MAP_PRIVATE | MAP_ANONYMOUS |
97
+ MAP_NORESERVE, errp) < 0) {
98
return false;
99
}
100
/* The size of the mapping may have been adjusted. */
101
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
102
/* Request large pages for the buffer and the splitwx. */
103
qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
104
qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
105
- return true;
106
+ return PROT_READ | PROT_WRITE;
107
108
fail_rx:
109
error_setg_errno(errp, errno, "failed to map shared memory for execute");
110
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
111
if (fd >= 0) {
112
close(fd);
113
}
114
- return false;
115
+ return -1;
116
}
117
#endif /* CONFIG_POSIX */
118
119
@@ -XXX,XX +XXX,XX @@ extern kern_return_t mach_vm_remap(vm_map_t target_task,
120
vm_prot_t *max_protection,
121
vm_inherit_t inheritance);
122
123
-static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
124
+static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
125
{
126
kern_return_t ret;
127
mach_vm_address_t buf_rw, buf_rx;
128
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
129
/* Map the read-write portion via normal anon memory. */
130
if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
131
MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
132
- return false;
133
+ return -1;
134
}
135
136
buf_rw = region.start_aligned;
137
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
138
/* TODO: Convert "ret" to a human readable error message. */
139
error_setg(errp, "vm_remap for jit splitwx failed");
140
munmap((void *)buf_rw, size);
141
- return false;
142
+ return -1;
143
}
144
145
if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
146
error_setg_errno(errp, errno, "mprotect for jit splitwx");
147
munmap((void *)buf_rx, size);
148
munmap((void *)buf_rw, size);
149
- return false;
150
+ return -1;
151
}
152
153
tcg_splitwx_diff = buf_rx - buf_rw;
154
- return true;
155
+ return PROT_READ | PROT_WRITE;
156
}
157
#endif /* CONFIG_DARWIN */
158
#endif /* CONFIG_TCG_INTERPRETER */
159
160
-static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
161
+static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
162
{
163
#ifndef CONFIG_TCG_INTERPRETER
164
# ifdef CONFIG_DARWIN
165
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
166
# endif
167
#endif
168
error_setg(errp, "jit split-wx not supported");
169
- return false;
170
+ return -1;
171
}
172
173
-static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
174
+static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
175
{
176
ERRP_GUARD();
177
int prot, flags;
178
179
if (splitwx) {
180
- if (alloc_code_gen_buffer_splitwx(size, errp)) {
181
- return true;
182
+ prot = alloc_code_gen_buffer_splitwx(size, errp);
183
+ if (prot >= 0) {
184
+ return prot;
185
}
186
/*
187
* If splitwx force-on (1), fail;
188
* if splitwx default-on (-1), fall through to splitwx off.
189
*/
190
if (splitwx > 0) {
191
- return false;
192
+ return -1;
193
}
194
error_free_or_abort(errp);
195
}
196
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
197
size_t page_size;
198
size_t region_size;
199
size_t i;
200
- bool ok;
201
+ int have_prot;
202
203
- ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
204
- splitwx, &error_fatal);
205
- assert(ok);
206
+ have_prot = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
207
+ splitwx, &error_fatal);
208
+ assert(have_prot >= 0);
209
210
/*
211
* Make region_size a multiple of page_size, using aligned as the start.
212
--
32
--
213
2.25.1
33
2.43.0
214
215
diff view generated by jsdifflib
1
If qemu_get_host_physmem returns an odd number of pages,
1
The big comment just above says functions should be sorted.
2
then physmem / 8 will not be a multiple of the page size.
2
Add forward declarations as needed.
3
3
4
The following was observed on a gitlab runner:
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
6
ERROR qtest-arm/boot-serial-test - Bail out!
7
ERROR:../util/osdep.c:80:qemu_mprotect__osdep: \
8
assertion failed: (!(size & ~qemu_real_host_page_mask))
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
6
---
14
tcg/region.c | 47 +++++++++++++++++++++--------------------------
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
15
1 file changed, 21 insertions(+), 26 deletions(-)
8
1 file changed, 59 insertions(+), 55 deletions(-)
16
9
17
diff --git a/tcg/region.c b/tcg/region.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/region.c
12
--- a/tcg/optimize.c
20
+++ b/tcg/region.c
13
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
22
(DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
15
* 3) those that produce information about the result value.
23
? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
16
*/
24
17
25
-static size_t size_code_gen_buffer(size_t tb_size)
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
21
+
22
static bool fold_add(OptContext *ctx, TCGOp *op)
23
{
24
if (fold_const2_commutative(ctx, op) ||
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
27
}
28
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
30
+{
31
+ /* If true and false values are the same, eliminate the cmp. */
32
+ if (args_are_copies(op->args[2], op->args[3])) {
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
34
+ }
35
+
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
37
+ uint64_t tv = arg_info(op->args[2])->val;
38
+ uint64_t fv = arg_info(op->args[3])->val;
39
+
40
+ if (tv == -1 && fv == 0) {
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
42
+ }
43
+ if (tv == 0 && fv == -1) {
44
+ if (TCG_TARGET_HAS_not_vec) {
45
+ op->opc = INDEX_op_not_vec;
46
+ return fold_not(ctx, op);
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
51
+ }
52
+ }
53
+ }
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
82
+}
83
+
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
85
{
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
89
}
90
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
26
-{
92
-{
27
- /* Size the buffer. */
93
- /* If true and false values are the same, eliminate the cmp. */
28
- if (tb_size == 0) {
94
- if (args_are_copies(op->args[2], op->args[3])) {
29
- size_t phys_mem = qemu_get_host_physmem();
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
30
- if (phys_mem == 0) {
96
- }
31
- tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
97
-
32
- } else {
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
33
- tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
99
- uint64_t tv = arg_info(op->args[2])->val;
100
- uint64_t fv = arg_info(op->args[3])->val;
101
-
102
- if (tv == -1 && fv == 0) {
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
104
- }
105
- if (tv == 0 && fv == -1) {
106
- if (TCG_TARGET_HAS_not_vec) {
107
- op->opc = INDEX_op_not_vec;
108
- return fold_not(ctx, op);
109
- } else {
110
- op->opc = INDEX_op_xor_vec;
111
- op->args[2] = arg_new_constant(ctx, -1);
112
- return fold_xor(ctx, op);
113
- }
34
- }
114
- }
35
- }
115
- }
36
- if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
116
- if (arg_is_const(op->args[2])) {
37
- tb_size = MIN_CODE_GEN_BUFFER_SIZE;
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
38
- }
129
- }
39
- if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
130
- if (arg_is_const(op->args[3])) {
40
- tb_size = MAX_CODE_GEN_BUFFER_SIZE;
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
41
- }
142
- }
42
- return tb_size;
143
- return finish_folding(ctx, op);
43
-}
144
-}
44
-
145
-
45
#ifdef __mips__
146
/* Propagate constants and copies, fold constant expressions. */
46
/*
147
void tcg_optimize(TCGContext *s)
47
* In order to use J and JAL within the code_gen_buffer, we require
48
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
49
*/
50
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
51
{
148
{
52
- size_t page_size;
53
+ const size_t page_size = qemu_real_host_page_size;
54
size_t region_size;
55
size_t i;
56
int have_prot;
57
58
- have_prot = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
59
- splitwx, &error_fatal);
60
+ /* Size the buffer. */
61
+ if (tb_size == 0) {
62
+ size_t phys_mem = qemu_get_host_physmem();
63
+ if (phys_mem == 0) {
64
+ tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
65
+ } else {
66
+ tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size);
67
+ tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size);
68
+ }
69
+ }
70
+ if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
71
+ tb_size = MIN_CODE_GEN_BUFFER_SIZE;
72
+ }
73
+ if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
74
+ tb_size = MAX_CODE_GEN_BUFFER_SIZE;
75
+ }
76
+
77
+ have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal);
78
assert(have_prot >= 0);
79
80
/* Request large pages for the buffer and the splitwx. */
81
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
82
* As a result of this we might end up with a few extra pages at the end of
83
* the buffer; we will assign those to the last region.
84
*/
85
- region.n = tcg_n_regions(region.total_size, max_cpus);
86
- page_size = qemu_real_host_page_size;
87
- region_size = region.total_size / region.n;
88
+ region.n = tcg_n_regions(tb_size, max_cpus);
89
+ region_size = tb_size / region.n;
90
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
91
92
/* A region must have at least 2 pages; one code, one guard */
93
--
149
--
94
2.25.1
150
2.43.0
95
96
diff view generated by jsdifflib
1
Shortly, the full code_gen_buffer will only be visible
1
The big comment just above says functions should be sorted.
2
to region.c, so move in_code_gen_buffer out-of-line.
3
2
4
Move the debugging versions of tcg_splitwx_to_{rx,rw}
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
to region.c as well, so that the compiler gets to see
6
the implementation of in_code_gen_buffer.
7
8
This leaves exactly one use of in_code_gen_buffer outside
9
of region.c, in cpu_restore_state. Which, being on the
10
exception path, is not performance critical.
11
12
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
13
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
5
---
16
include/tcg/tcg.h | 11 +----------
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
17
tcg/region.c | 34 ++++++++++++++++++++++++++++++++++
7
1 file changed, 30 insertions(+), 30 deletions(-)
18
tcg/tcg.c | 23 -----------------------
19
3 files changed, 35 insertions(+), 33 deletions(-)
20
8
21
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
22
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
23
--- a/include/tcg/tcg.h
11
--- a/tcg/optimize.c
24
+++ b/include/tcg/tcg.h
12
+++ b/tcg/optimize.c
25
@@ -XXX,XX +XXX,XX @@ extern const void *tcg_code_gen_epilogue;
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
26
extern uintptr_t tcg_splitwx_diff;
14
return true;
27
extern TCGv_env cpu_env;
15
}
28
16
29
-static inline bool in_code_gen_buffer(const void *p)
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
30
-{
31
- const TCGContext *s = &tcg_init_ctx;
32
- /*
33
- * Much like it is valid to have a pointer to the byte past the
34
- * end of an array (so long as you don't dereference it), allow
35
- * a pointer to the byte past the end of the code gen buffer.
36
- */
37
- return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
38
-}
39
+bool in_code_gen_buffer(const void *p);
40
41
#ifdef CONFIG_DEBUG_TCG
42
const void *tcg_splitwx_to_rx(void *rw);
43
diff --git a/tcg/region.c b/tcg/region.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/region.c
46
+++ b/tcg/region.c
47
@@ -XXX,XX +XXX,XX @@ static struct tcg_region_state region;
48
static void *region_trees;
49
static size_t tree_size;
50
51
+bool in_code_gen_buffer(const void *p)
52
+{
18
+{
53
+ const TCGContext *s = &tcg_init_ctx;
19
+ /* Canonicalize the comparison to put immediate second. */
54
+ /*
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
55
+ * Much like it is valid to have a pointer to the byte past the
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
56
+ * end of an array (so long as you don't dereference it), allow
22
+ }
57
+ * a pointer to the byte past the end of the code gen buffer.
23
+ return finish_folding(ctx, op);
58
+ */
59
+ return (size_t)(p - s->code_gen_buffer) <= s->code_gen_buffer_size;
60
+}
24
+}
61
+
25
+
62
+#ifdef CONFIG_DEBUG_TCG
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
63
+const void *tcg_splitwx_to_rx(void *rw)
64
+{
27
+{
65
+ /* Pass NULL pointers unchanged. */
28
+ /* If true and false values are the same, eliminate the cmp. */
66
+ if (rw) {
29
+ if (args_are_copies(op->args[3], op->args[4])) {
67
+ g_assert(in_code_gen_buffer(rw));
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
68
+ rw += tcg_splitwx_diff;
69
+ }
31
+ }
70
+ return rw;
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
71
+}
45
+}
72
+
46
+
73
+void *tcg_splitwx_to_rw(const void *rx)
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
74
+{
75
+ /* Pass NULL pointers unchanged. */
76
+ if (rx) {
77
+ rx -= tcg_splitwx_diff;
78
+ /* Assert that we end with a pointer in the rw region. */
79
+ g_assert(in_code_gen_buffer(rx));
80
+ }
81
+ return (void *)rx;
82
+}
83
+#endif /* CONFIG_DEBUG_TCG */
84
+
85
/* compare a pointer @ptr and a tb_tc @s */
86
static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
87
{
48
{
88
diff --git a/tcg/tcg.c b/tcg/tcg.c
49
uint64_t z_mask, s_mask;
89
index XXXXXXX..XXXXXXX 100644
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
90
--- a/tcg/tcg.c
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
91
+++ b/tcg/tcg.c
52
}
92
@@ -XXX,XX +XXX,XX @@ static const TCGTargetOpDef constraint_sets[] = {
53
93
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
94
#include "tcg-target.c.inc"
95
96
-#ifdef CONFIG_DEBUG_TCG
97
-const void *tcg_splitwx_to_rx(void *rw)
98
-{
55
-{
99
- /* Pass NULL pointers unchanged. */
56
- /* Canonicalize the comparison to put immediate second. */
100
- if (rw) {
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
101
- g_assert(in_code_gen_buffer(rw));
58
- op->args[3] = tcg_swap_cond(op->args[3]);
102
- rw += tcg_splitwx_diff;
103
- }
59
- }
104
- return rw;
60
- return finish_folding(ctx, op);
105
-}
61
-}
106
-
62
-
107
-void *tcg_splitwx_to_rw(const void *rx)
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
108
-{
64
-{
109
- /* Pass NULL pointers unchanged. */
65
- /* If true and false values are the same, eliminate the cmp. */
110
- if (rx) {
66
- if (args_are_copies(op->args[3], op->args[4])) {
111
- rx -= tcg_splitwx_diff;
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
112
- /* Assert that we end with a pointer in the rw region. */
113
- g_assert(in_code_gen_buffer(rx));
114
- }
68
- }
115
- return (void *)rx;
69
-
70
- /* Canonicalize the comparison to put immediate second. */
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
116
-}
82
-}
117
-#endif /* CONFIG_DEBUG_TCG */
118
-
83
-
119
static void alloc_tcg_plugin_context(TCGContext *s)
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
120
{
85
{
121
#ifdef CONFIG_PLUGIN
86
uint64_t z_mask, s_mask, s_mask_old;
122
--
87
--
123
2.25.1
88
2.43.0
124
125
diff view generated by jsdifflib
1
Typo in the conversion to FloatParts64.
1
We currently have a flag, float_muladd_halve_result, to scale
2
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
Fixes: 572c4d862ff2
3
4
Fixes: Coverity CID 1457457
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Message-Id: <20210607223812.110596-1-richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
fpu/softfloat.c | 2 +-
7
include/fpu/softfloat.h | 6 ++++
11
1 file changed, 1 insertion(+), 1 deletion(-)
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
12
9
fpu/softfloat-parts.c.inc | 7 +++--
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/fpu/softfloat.h
15
+++ b/include/fpu/softfloat.h
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
17
float16 float16_sub(float16, float16, float_status *status);
18
float16 float16_mul(float16, float16, float_status *status);
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
20
+float16 float16_muladd_scalbn(float16, float16, float16,
21
+ int, int, float_status *status);
22
float16 float16_div(float16, float16, float_status *status);
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
13
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
14
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
15
--- a/fpu/softfloat.c
45
--- a/fpu/softfloat.c
16
+++ b/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
99
+}
100
+
101
+float32 QEMU_SOFTFLOAT_ATTR
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
103
+ int scale, int flags, float_status *status)
104
{
105
FloatParts64 pa, pb, pc, *pr;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
17
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
18
179
19
float_raise(float_flag_inexact, status);
180
float64_unpack_canonical(&rp, float64_one, status);
20
181
for (i = 0 ; i < 15 ; i++) {
21
- float64_unpack_canonical(&xnp, float64_ln2, status);
182
+
22
+ float64_unpack_canonical(&tp, float64_ln2, status);
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
23
xp = *parts_mul(&xp, &tp, status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
24
xnp = xp;
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
25
186
xnp = *parts_mul(&xnp, &xp, status);
187
}
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
190
index XXXXXXX..XXXXXXX 100644
191
--- a/fpu/softfloat-parts.c.inc
192
+++ b/fpu/softfloat-parts.c.inc
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
194
* Requires A and C extracted into a double-sized structure to provide the
195
* extra space for the widening multiply.
196
*/
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
26
--
217
--
27
2.25.1
218
2.43.0
28
219
29
220
diff view generated by jsdifflib
1
All callers immediately assert on error, so move the assert
1
Use the scalbn interface instead of float_muladd_halve_result.
2
into the function itself.
3
2
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
tcg/tcg.c | 19 ++++++-------------
6
target/arm/tcg/helper-a64.c | 6 +++---
10
1 file changed, 6 insertions(+), 13 deletions(-)
7
1 file changed, 3 insertions(+), 3 deletions(-)
11
8
12
diff --git a/tcg/tcg.c b/tcg/tcg.c
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tcg.c
11
--- a/target/arm/tcg/helper-a64.c
15
+++ b/tcg/tcg.c
12
+++ b/target/arm/tcg/helper-a64.c
16
@@ -XXX,XX +XXX,XX @@ static bool tcg_region_alloc(TCGContext *s)
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
17
* Perform a context's first region allocation.
14
(float16_is_infinity(b) && float16_is_zero(a))) {
18
* This function does _not_ increment region.agg_size_full.
15
return float16_one_point_five;
19
*/
16
}
20
-static inline bool tcg_region_initial_alloc__locked(TCGContext *s)
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
21
+static void tcg_region_initial_alloc__locked(TCGContext *s)
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
22
{
23
- return tcg_region_alloc__locked(s);
24
+ bool err = tcg_region_alloc__locked(s);
25
+ g_assert(!err);
26
}
19
}
27
20
28
/* Call from a safe-work context */
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
29
@@ -XXX,XX +XXX,XX @@ void tcg_region_reset_all(void)
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
30
23
(float32_is_infinity(b) && float32_is_zero(a))) {
31
for (i = 0; i < n_ctxs; i++) {
24
return float32_one_point_five;
32
TCGContext *s = qatomic_read(&tcg_ctxs[i]);
33
- bool err = tcg_region_initial_alloc__locked(s);
34
-
35
- g_assert(!err);
36
+ tcg_region_initial_alloc__locked(s);
37
}
25
}
38
qemu_mutex_unlock(&region.lock);
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
39
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
40
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(void)
41
42
/* In user-mode we support only one ctx, so do the initial allocation now */
43
#ifdef CONFIG_USER_ONLY
44
- {
45
- bool err = tcg_region_initial_alloc__locked(tcg_ctx);
46
-
47
- g_assert(!err);
48
- }
49
+ tcg_region_initial_alloc__locked(tcg_ctx);
50
#endif
51
}
28
}
52
29
53
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
54
MachineState *ms = MACHINE(qdev_get_machine());
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
55
TCGContext *s = g_malloc(sizeof(*s));
32
(float64_is_infinity(b) && float64_is_zero(a))) {
56
unsigned int i, n;
33
return float64_one_point_five;
57
- bool err;
34
}
58
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
59
*s = tcg_init_ctx;
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
60
61
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void)
62
63
tcg_ctx = s;
64
qemu_mutex_lock(&region.lock);
65
- err = tcg_region_initial_alloc__locked(tcg_ctx);
66
- g_assert(!err);
67
+ tcg_region_initial_alloc__locked(s);
68
qemu_mutex_unlock(&region.lock);
69
}
37
}
70
#endif /* !CONFIG_USER_ONLY */
38
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
71
--
40
--
72
2.25.1
41
2.43.0
73
42
74
43
diff view generated by jsdifflib
New patch
1
1
Use the scalbn interface instead of float_muladd_halve_result.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/sparc/helper.h | 4 +-
7
target/sparc/fop_helper.c | 8 ++--
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/sparc/helper.h
14
+++ b/target/sparc/helper.h
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
23
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
205
--
206
2.43.0
207
208
diff view generated by jsdifflib
1
From: Luis Pires <luis.pires@eldorado.org.br>
1
All uses have been convered to float*_muladd_scalbn.
2
2
3
Signed-off-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Message-Id: <20210601125143.191165-1-luis.pires@eldorado.org.br>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
docs/devel/tcg.rst | 101 ++++++++++++++++++++++++++++++++++++++++-----
6
include/fpu/softfloat.h | 3 ---
8
1 file changed, 90 insertions(+), 11 deletions(-)
7
fpu/softfloat.c | 6 ------
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
9
10
10
diff --git a/docs/devel/tcg.rst b/docs/devel/tcg.rst
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/docs/devel/tcg.rst
13
--- a/include/fpu/softfloat.h
13
+++ b/docs/devel/tcg.rst
14
+++ b/include/fpu/softfloat.h
14
@@ -XXX,XX +XXX,XX @@ performances.
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
15
QEMU's dynamic translation backend is called TCG, for "Tiny Code
16
| Using these differs from negating an input or output before calling
16
Generator". For more information, please take a look at ``tcg/README``.
17
| the muladd function in that this means that a NaN doesn't have its
17
18
| sign bit inverted before it is propagated.
18
-Some notable features of QEMU's dynamic translator are:
19
-| We also support halving the result before rounding, as a special
19
+The following sections outline some notable features and implementation
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
20
+details of QEMU's dynamic translator.
21
*----------------------------------------------------------------------------*/
21
22
enum {
22
CPU state optimisations
23
float_muladd_negate_c = 1,
23
-----------------------
24
float_muladd_negate_product = 2,
24
25
float_muladd_negate_result = 4,
25
-The target CPUs have many internal states which change the way it
26
- float_muladd_halve_result = 8,
26
-evaluates instructions. In order to achieve a good speed, the
27
};
27
+The target CPUs have many internal states which change the way they
28
28
+evaluate instructions. In order to achieve a good speed, the
29
/*----------------------------------------------------------------------------
29
translation phase considers that some state information of the virtual
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
30
CPU cannot change in it. The state is recorded in the Translation
31
index XXXXXXX..XXXXXXX 100644
31
Block (TB). If the state changes (e.g. privilege level), a new TB will
32
--- a/fpu/softfloat.c
32
@@ -XXX,XX +XXX,XX @@ Direct block chaining
33
+++ b/fpu/softfloat.c
33
---------------------
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
34
35
if (unlikely(!can_use_fpu(s))) {
35
After each translated basic block is executed, QEMU uses the simulated
36
goto soft;
36
-Program Counter (PC) and other cpu state information (such as the CS
37
}
37
+Program Counter (PC) and other CPU state information (such as the CS
38
- if (unlikely(flags & float_muladd_halve_result)) {
38
segment base value) to find the next basic block.
39
- goto soft;
39
40
- }
40
-In order to accelerate the most common cases where the new simulated PC
41
41
-is known, QEMU can patch a basic block so that it jumps directly to the
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
42
-next one.
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
43
+In its simplest, less optimized form, this is done by exiting from the
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
44
+current TB, going through the TB epilogue, and then back to the
45
if (unlikely(!can_use_fpu(s))) {
45
+main loop. That’s where QEMU looks for the next TB to execute,
46
goto soft;
46
+translating it from the guest architecture if it isn’t already available
47
}
47
+in memory. Then QEMU proceeds to execute this next TB, starting at the
48
- if (unlikely(flags & float_muladd_halve_result)) {
48
+prologue and then moving on to the translated instructions.
49
- goto soft;
49
50
- }
50
-The most portable code uses an indirect jump. An indirect jump makes
51
51
-it easier to make the jump target modification atomic. On some host
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
52
-architectures (such as x86 or PowerPC), the ``JUMP`` opcode is
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
53
-directly patched so that the block chaining has no overhead.
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
54
+Exiting from the TB this way will cause the ``cpu_exec_interrupt()``
55
index XXXXXXX..XXXXXXX 100644
55
+callback to be re-evaluated before executing additional instructions.
56
--- a/fpu/softfloat-parts.c.inc
56
+It is mandatory to exit this way after any CPU state changes that may
57
+++ b/fpu/softfloat-parts.c.inc
57
+unmask interrupts.
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
58
+
59
a->exp = p_widen.exp;
59
+In order to accelerate the cases where the TB for the new
60
60
+simulated PC is already available, QEMU has mechanisms that allow
61
return_normal:
61
+multiple TBs to be chained directly, without having to go back to the
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
62
+main loop as described above. These mechanisms are:
63
- if (flags & float_muladd_halve_result) {
63
+
64
- a->exp -= 1;
64
+``lookup_and_goto_ptr``
65
- }
65
+^^^^^^^^^^^^^^^^^^^^^^^
66
a->exp += scale;
66
+
67
finish_sign:
67
+Calling ``tcg_gen_lookup_and_goto_ptr()`` will emit a call to
68
if (flags & float_muladd_negate_result) {
68
+``helper_lookup_tb_ptr``. This helper will look for an existing TB that
69
+matches the current CPU state. If the destination TB is available its
70
+code address is returned, otherwise the address of the JIT epilogue is
71
+returned. The call to the helper is always followed by the tcg ``goto_ptr``
72
+opcode, which branches to the returned address. In this way, we either
73
+branch to the next TB or return to the main loop.
74
+
75
+``goto_tb + exit_tb``
76
+^^^^^^^^^^^^^^^^^^^^^
77
+
78
+The translation code usually implements branching by performing the
79
+following steps:
80
+
81
+1. Call ``tcg_gen_goto_tb()`` passing a jump slot index (either 0 or 1)
82
+ as a parameter.
83
+
84
+2. Emit TCG instructions to update the CPU state with any information
85
+ that has been assumed constant and is required by the main loop to
86
+ correctly locate and execute the next TB. For most guests, this is
87
+ just the PC of the branch destination, but others may store additional
88
+ data. The information updated in this step must be inferable from both
89
+ ``cpu_get_tb_cpu_state()`` and ``cpu_restore_state()``.
90
+
91
+3. Call ``tcg_gen_exit_tb()`` passing the address of the current TB and
92
+ the jump slot index again.
93
+
94
+Step 1, ``tcg_gen_goto_tb()``, will emit a ``goto_tb`` TCG
95
+instruction that later on gets translated to a jump to an address
96
+associated with the specified jump slot. Initially, this is the address
97
+of step 2's instructions, which update the CPU state information. Step 3,
98
+``tcg_gen_exit_tb()``, exits from the current TB returning a tagged
99
+pointer composed of the last executed TB’s address and the jump slot
100
+index.
101
+
102
+The first time this whole sequence is executed, step 1 simply jumps
103
+to step 2. Then the CPU state information gets updated and we exit from
104
+the current TB. As a result, the behavior is very similar to the less
105
+optimized form described earlier in this section.
106
+
107
+Next, the main loop looks for the next TB to execute using the
108
+current CPU state information (creating the TB if it wasn’t already
109
+available) and, before starting to execute the new TB’s instructions,
110
+patches the previously executed TB by associating one of its jump
111
+slots (the one specified in the call to ``tcg_gen_exit_tb()``) with the
112
+address of the new TB.
113
+
114
+The next time this previous TB is executed and we get to that same
115
+``goto_tb`` step, it will already be patched (assuming the destination TB
116
+is still in memory) and will jump directly to the first instruction of
117
+the destination TB, without going back to the main loop.
118
+
119
+For the ``goto_tb + exit_tb`` mechanism to be used, the following
120
+conditions need to be satisfied:
121
+
122
+* The change in CPU state must be constant, e.g., a direct branch and
123
+ not an indirect branch.
124
+
125
+* The direct branch cannot cross a page boundary. Memory mappings
126
+ may change, causing the code at the destination address to change.
127
+
128
+Note that, on step 3 (``tcg_gen_exit_tb()``), in addition to the
129
+jump slot index, the address of the TB just executed is also returned.
130
+This address corresponds to the TB that will be patched; it may be
131
+different than the one that was directly executed from the main loop
132
+if the latter had already been chained to other TBs.
133
134
Self-modifying code and translated code invalidation
135
----------------------------------------------------
136
--
69
--
137
2.25.1
70
2.43.0
138
71
139
72
diff view generated by jsdifflib
1
At some point during the development of tcg_constant_*, I changed
1
This rounding mode is used by Hexagon.
2
my mind about whether such temps should be able to be passed to
3
tcg_temp_free_*. The final version committed allows this, but the
4
commentary was not updated to match.
5
2
6
Fixes: c0522136adf
7
Reported-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
4
---
12
include/tcg/tcg.h | 3 ++-
5
include/fpu/softfloat-types.h | 2 ++
13
1 file changed, 2 insertions(+), 1 deletion(-)
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
14
8
15
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
16
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
17
--- a/include/tcg/tcg.h
11
--- a/include/fpu/softfloat-types.h
18
+++ b/include/tcg/tcg.h
12
+++ b/include/fpu/softfloat-types.h
19
@@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_const_ones_vec_matching(TCGv_vec);
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
20
21
/*
21
/*
22
* Locate or create a read-only temporary that is a constant.
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
- * This kind of temporary need not and should not be freed.
23
index XXXXXXX..XXXXXXX 100644
24
+ * This kind of temporary need not be freed, but for convenience
24
--- a/fpu/softfloat-parts.c.inc
25
+ * will be silently ignored by tcg_temp_free_*.
25
+++ b/fpu/softfloat-parts.c.inc
26
*/
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
27
int exp, flags = 0;
28
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
29
--
36
--
30
2.25.1
37
2.43.0
31
32
diff view generated by jsdifflib
New patch
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/fpu/softfloat.h | 5 +++++
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
39
}
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
61
--
62
2.43.0
diff view generated by jsdifflib
1
It consists of one function call and has only one caller.
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
2
3
3
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
accel/tcg/translate-all.c | 7 +------
7
target/hexagon/fma_emu.h | 1 -
9
1 file changed, 1 insertion(+), 6 deletions(-)
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
10
11
11
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
12
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/translate-all.c
14
--- a/target/hexagon/fma_emu.h
14
+++ b/accel/tcg/translate-all.c
15
+++ b/target/hexagon/fma_emu.h
15
@@ -XXX,XX +XXX,XX @@ static void page_table_config_init(void)
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
16
assert(v_l2_levels >= 0);
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
17
}
30
}
18
31
19
-static void cpu_gen_init(void)
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
20
-{
33
-{
21
- tcg_context_init(&tcg_init_ctx);
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
35
- return float32_mul(a, b, fp_status);
36
- }
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
22
-}
38
-}
23
-
39
-
24
/* Encode VAL as a signed leb128 sequence at P.
40
float64 internal_mpyhh(float64 a, float64 b,
25
Return P incremented past the encoded value. */
41
unsigned long long int accumulated,
26
static uint8_t *encode_sleb128(uint8_t *p, target_long val)
42
float_status *fp_status)
27
@@ -XXX,XX +XXX,XX @@ void tcg_exec_init(unsigned long tb_size, int splitwx)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
28
bool ok;
44
index XXXXXXX..XXXXXXX 100644
29
45
--- a/target/hexagon/op_helper.c
30
tcg_allowed = true;
46
+++ b/target/hexagon/op_helper.c
31
- cpu_gen_init();
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
32
+ tcg_context_init(&tcg_init_ctx);
48
{
33
page_init();
49
float32 RdV;
34
tb_htable_init();
50
arch_fpop_start(env);
35
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
53
arch_fpop_end(env);
54
return RdV;
55
}
36
--
56
--
37
2.25.1
57
2.43.0
38
39
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
1
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
1
From: "Jose R. Ziviani" <jziviani@suse.de>
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
2
5
3
Commit 5e8892db93 fixed several function signatures but tcg_out_op for
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
arm is missing. This patch fixes it as well.
5
6
Signed-off-by: Jose R. Ziviani <jziviani@suse.de>
7
Message-Id: <20210610224450.23425-1-jziviani@suse.de>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
8
---
10
tcg/arm/tcg-target.c.inc | 3 ++-
9
target/hexagon/op_helper.c | 11 +++--------
11
1 file changed, 2 insertions(+), 1 deletion(-)
10
1 file changed, 3 insertions(+), 8 deletions(-)
12
11
13
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
14
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/arm/tcg-target.c.inc
14
--- a/target/hexagon/op_helper.c
16
+++ b/tcg/arm/tcg-target.c.inc
15
+++ b/target/hexagon/op_helper.c
17
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
18
static void tcg_out_epilogue(TCGContext *s);
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
19
18
float32 RsV, float32 RtV, float32 PuV)
20
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
21
- const TCGArg *args, const int *const_args)
22
+ const TCGArg args[TCG_MAX_OP_ARGS],
23
+ const int const_args[TCG_MAX_OP_ARGS])
24
{
19
{
25
TCGArg a0, a1, a2, a3, a4, a5;
20
- size4s_t tmp;
26
int c;
21
arch_fpop_start(env);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
27
--
35
--
28
2.25.1
36
2.43.0
29
30
diff view generated by jsdifflib
1
Start removing the include of hw/boards.h from tcg/.
1
There are multiple special cases for this instruction.
2
Pass down the max_cpus value from tcg_init_machine,
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
where we have the MachineState already.
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
4
8
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
11
---
10
include/tcg/tcg.h | 2 +-
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
11
tcg/tcg-internal.h | 2 +-
13
1 file changed, 26 insertions(+), 79 deletions(-)
12
accel/tcg/tcg-all.c | 10 +++++++++-
13
tcg/region.c | 32 +++++++++++---------------------
14
tcg/tcg.c | 10 ++++------
15
5 files changed, 26 insertions(+), 30 deletions(-)
16
14
17
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
18
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg.h
17
--- a/target/hexagon/op_helper.c
20
+++ b/include/tcg/tcg.h
18
+++ b/target/hexagon/op_helper.c
21
@@ -XXX,XX +XXX,XX @@ static inline void *tcg_malloc(int size)
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
22
}
20
return RxV;
23
}
21
}
24
22
25
-void tcg_init(size_t tb_size, int splitwx);
23
-static bool is_zero_prod(float32 a, float32 b)
26
+void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus);
24
-{
27
void tcg_register_thread(void);
25
- return ((float32_is_zero(a) && is_finite(b)) ||
28
void tcg_prologue_init(TCGContext *s);
26
- (float32_is_zero(b) && is_finite(a)));
29
void tcg_func_start(TCGContext *s);
27
-}
30
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
28
-
31
index XXXXXXX..XXXXXXX 100644
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
32
--- a/tcg/tcg-internal.h
30
-{
33
+++ b/tcg/tcg-internal.h
31
- float32 ret = dst;
34
@@ -XXX,XX +XXX,XX @@
32
- if (float32_is_any_nan(x)) {
35
extern TCGContext **tcg_ctxs;
33
- if (extract32(x, 22, 1) == 0) {
36
extern unsigned int n_tcg_ctxs;
34
- float_raise(float_flag_invalid, fp_status);
37
35
- }
38
-void tcg_region_init(size_t tb_size, int splitwx);
36
- ret = make_float32(0xffffffff); /* nan */
39
+void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus);
37
- }
40
bool tcg_region_alloc(TCGContext *s);
38
- return ret;
41
void tcg_region_initial_alloc(TCGContext *s);
39
-}
42
void tcg_region_prologue_set(TCGContext *s);
40
-
43
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
44
index XXXXXXX..XXXXXXX 100644
42
float32 RsV, float32 RtV, float32 PuV)
45
--- a/accel/tcg/tcg-all.c
46
+++ b/accel/tcg/tcg-all.c
47
@@ -XXX,XX +XXX,XX @@
48
#include "qemu/accel.h"
49
#include "qapi/qapi-builtin-visit.h"
50
#include "qemu/units.h"
51
+#if !defined(CONFIG_USER_ONLY)
52
+#include "hw/boards.h"
53
+#endif
54
#include "internal.h"
55
56
struct TCGState {
57
@@ -XXX,XX +XXX,XX @@ bool mttcg_enabled;
58
static int tcg_init_machine(MachineState *ms)
59
{
43
{
60
TCGState *s = TCG_STATE(current_accel());
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
61
+#ifdef CONFIG_USER_ONLY
45
return RxV;
62
+ unsigned max_cpus = 1;
63
+#else
64
+ unsigned max_cpus = ms->smp.max_cpus;
65
+#endif
66
67
tcg_allowed = true;
68
mttcg_enabled = s->mttcg_enabled;
69
70
page_init();
71
tb_htable_init();
72
- tcg_init(s->tb_size * MiB, s->splitwx_enabled);
73
+ tcg_init(s->tb_size * MiB, s->splitwx_enabled, max_cpus);
74
75
#if defined(CONFIG_SOFTMMU)
76
/*
77
diff --git a/tcg/region.c b/tcg/region.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/tcg/region.c
80
+++ b/tcg/region.c
81
@@ -XXX,XX +XXX,XX @@
82
#include "qapi/error.h"
83
#include "exec/exec-all.h"
84
#include "tcg/tcg.h"
85
-#if !defined(CONFIG_USER_ONLY)
86
-#include "hw/boards.h"
87
-#endif
88
#include "tcg-internal.h"
89
90
91
@@ -XXX,XX +XXX,XX @@ void tcg_region_reset_all(void)
92
tcg_region_tree_reset_all();
93
}
46
}
94
47
95
+static size_t tcg_n_regions(unsigned max_cpus)
48
-static bool is_inf_prod(int32_t a, int32_t b)
96
+{
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
97
#ifdef CONFIG_USER_ONLY
50
+ float32 RsV, float32 RtV, int negate)
98
-static size_t tcg_n_regions(void)
99
-{
100
return 1;
101
-}
102
#else
103
-/*
104
- * It is likely that some vCPUs will translate more code than others, so we
105
- * first try to set more regions than max_cpus, with those regions being of
106
- * reasonable size. If that's not possible we make do by evenly dividing
107
- * the code_gen_buffer among the vCPUs.
108
- */
109
-static size_t tcg_n_regions(void)
110
-{
111
+ /*
112
+ * It is likely that some vCPUs will translate more code than others,
113
+ * so we first try to set more regions than max_cpus, with those regions
114
+ * being of reasonable size. If that's not possible we make do by evenly
115
+ * dividing the code_gen_buffer among the vCPUs.
116
+ */
117
size_t i;
118
119
/* Use a single region if all we have is one vCPU thread */
120
-#if !defined(CONFIG_USER_ONLY)
121
- MachineState *ms = MACHINE(qdev_get_machine());
122
- unsigned int max_cpus = ms->smp.max_cpus;
123
-#endif
124
if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
125
return 1;
126
}
127
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(void)
128
}
129
/* If we can't, then just allocate one region per vCPU thread */
130
return max_cpus;
131
-}
132
#endif
133
+}
134
135
/*
136
* Minimum size of the code gen buffer. This number is randomly chosen,
137
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
138
* in practice. Multi-threaded guests share most if not all of their translated
139
* code, which makes parallel code generation less appealing than in softmmu.
140
*/
141
-void tcg_region_init(size_t tb_size, int splitwx)
142
+void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
143
{
51
{
144
void *buf, *aligned;
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
145
size_t size;
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
146
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx)
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
147
buf = tcg_init_ctx.code_gen_buffer;
55
+ int flags;
148
size = tcg_init_ctx.code_gen_buffer_size;
56
+
149
page_size = qemu_real_host_page_size;
57
+ arch_fpop_start(env);
150
- n_regions = tcg_n_regions();
58
+
151
+ n_regions = tcg_n_regions(max_cpus);
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
152
60
+ RxV = float32_muladd(RsV, RtV, RxV,
153
/* The first region will be 'aligned - buf' bytes larger than the others */
61
+ negate | float_muladd_suppress_add_product_zero,
154
aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
62
+ &env->fp_status);
155
diff --git a/tcg/tcg.c b/tcg/tcg.c
63
+
156
index XXXXXXX..XXXXXXX 100644
64
+ flags = get_float_exception_flags(&env->fp_status);
157
--- a/tcg/tcg.c
65
+ if (flags) {
158
+++ b/tcg/tcg.c
66
+ /* Flags are suppressed by this instruction. */
159
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s);
67
+ set_float_exception_flags(0, &env->fp_status);
160
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
68
+
161
TCGReg reg, const char *name);
69
+ /* Return 0 for Inf - Inf. */
162
70
+ if (flags & float_flag_invalid_isi) {
163
-static void tcg_context_init(void)
71
+ RxV = 0;
164
+static void tcg_context_init(unsigned max_cpus)
72
+ }
73
+ }
74
+
75
+ arch_fpop_end(env);
76
+ return RxV;
77
}
78
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
80
float32 RsV, float32 RtV)
165
{
81
{
166
TCGContext *s = &tcg_init_ctx;
82
- bool infinp;
167
int op, total_args, n, i;
83
- bool infminusinf;
168
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(void)
84
- float32 tmp;
169
tcg_ctxs = &tcg_ctx;
85
-
170
n_tcg_ctxs = 1;
86
- arch_fpop_start(env);
171
#else
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
172
- MachineState *ms = MACHINE(qdev_get_machine());
88
- infminusinf = float32_is_infinity(RxV) &&
173
- unsigned int max_cpus = ms->smp.max_cpus;
89
- is_inf_prod(RsV, RtV) &&
174
tcg_ctxs = g_new(TCGContext *, max_cpus);
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
175
#endif
91
- infinp = float32_is_infinity(RxV) ||
176
92
- float32_is_infinity(RtV) ||
177
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(void)
93
- float32_is_infinity(RsV);
178
cpu_env = temp_tcgv_ptr(ts);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
99
- RxV = tmp;
100
- }
101
- set_float_exception_flags(0, &env->fp_status);
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
179
}
111
}
180
112
181
-void tcg_init(size_t tb_size, int splitwx)
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
182
+void tcg_init(size_t tb_size, int splitwx, unsigned max_cpus)
114
float32 RsV, float32 RtV)
183
{
115
{
184
- tcg_context_init();
116
- bool infinp;
185
- tcg_region_init(tb_size, splitwx);
117
- bool infminusinf;
186
+ tcg_context_init(max_cpus);
118
- float32 tmp;
187
+ tcg_region_init(tb_size, splitwx, max_cpus);
119
-
120
- arch_fpop_start(env);
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
122
- infminusinf = float32_is_infinity(RxV) &&
123
- is_inf_prod(RsV, RtV) &&
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
125
- infinp = float32_is_infinity(RxV) ||
126
- float32_is_infinity(RtV) ||
127
- float32_is_infinity(RsV);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
188
}
146
}
189
147
190
/*
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
191
--
149
--
192
2.25.1
150
2.43.0
193
194
diff view generated by jsdifflib
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
The function is now unused.
2
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
2
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
accel/tcg/tcg-all.c | 3 ++-
6
target/hexagon/fma_emu.h | 2 -
7
1 file changed, 2 insertions(+), 1 deletion(-)
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
8
9
9
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
10
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
11
--- a/accel/tcg/tcg-all.c
12
--- a/target/hexagon/fma_emu.h
12
+++ b/accel/tcg/tcg-all.c
13
+++ b/target/hexagon/fma_emu.h
13
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
14
#include "qemu/error-report.h"
15
}
15
#include "qemu/accel.h"
16
int32_t float32_getexp(float32 f32);
16
#include "qapi/qapi-builtin-visit.h"
17
float32 infinite_float32(uint8_t sign);
17
+#include "qemu/units.h"
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
18
#include "internal.h"
19
- int scale, float_status *fp_status);
19
20
float64 internal_mpyhh(float64 a, float64 b,
20
struct TCGState {
21
unsigned long long int accumulated,
21
@@ -XXX,XX +XXX,XX @@ static int tcg_init_machine(MachineState *ms)
22
float_status *fp_status);
22
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
23
page_init();
24
index XXXXXXX..XXXXXXX 100644
24
tb_htable_init();
25
--- a/target/hexagon/fma_emu.c
25
- tcg_init(s->tb_size * 1024 * 1024, s->splitwx_enabled);
26
+++ b/target/hexagon/fma_emu.c
26
+ tcg_init(s->tb_size * MiB, s->splitwx_enabled);
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
27
28
return -1;
28
#if defined(CONFIG_SOFTMMU)
29
}
29
/*
30
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
30
--
219
--
31
2.25.1
220
2.43.0
32
33
diff view generated by jsdifflib
1
A size is easier to work with than an end point,
1
This massive macro is now only used once.
2
particularly during initial buffer allocation.
2
Expand it for use only by float64.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/region.c | 30 ++++++++++++++++++------------
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
9
1 file changed, 18 insertions(+), 12 deletions(-)
8
1 file changed, 127 insertions(+), 128 deletions(-)
10
9
11
diff --git a/tcg/region.c b/tcg/region.c
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/region.c
12
--- a/target/hexagon/fma_emu.c
14
+++ b/tcg/region.c
13
+++ b/target/hexagon/fma_emu.c
15
@@ -XXX,XX +XXX,XX @@ struct tcg_region_state {
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
16
/* fields set at init time */
17
void *start;
18
void *start_aligned;
19
- void *end;
20
size_t n;
21
size_t size; /* size of one region */
22
size_t stride; /* .size + guard size */
23
+ size_t total_size; /* size of entire buffer, >= n * stride */
24
25
/* fields protected by the lock */
26
size_t current; /* current region index */
27
@@ -XXX,XX +XXX,XX @@ static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
28
if (curr_region == 0) {
29
start = region.start;
30
}
31
+ /* The final region may have a few extra pages due to earlier rounding. */
32
if (curr_region == region.n - 1) {
33
- end = region.end;
34
+ end = region.start_aligned + region.total_size;
35
}
36
37
*pstart = start;
38
@@ -XXX,XX +XXX,XX @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
39
*/
40
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
41
{
42
- void *buf, *aligned;
43
- size_t size;
44
+ void *buf, *aligned, *end;
45
+ size_t total_size;
46
size_t page_size;
47
size_t region_size;
48
size_t n_regions;
49
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
50
assert(ok);
51
52
buf = tcg_init_ctx.code_gen_buffer;
53
- size = tcg_init_ctx.code_gen_buffer_size;
54
+ total_size = tcg_init_ctx.code_gen_buffer_size;
55
page_size = qemu_real_host_page_size;
56
n_regions = tcg_n_regions(max_cpus);
57
58
/* The first region will be 'aligned - buf' bytes larger than the others */
59
aligned = QEMU_ALIGN_PTR_UP(buf, page_size);
60
- g_assert(aligned < tcg_init_ctx.code_gen_buffer + size);
61
+ g_assert(aligned < tcg_init_ctx.code_gen_buffer + total_size);
62
+
63
/*
64
* Make region_size a multiple of page_size, using aligned as the start.
65
* As a result of this we might end up with a few extra pages at the end of
66
* the buffer; we will assign those to the last region.
67
*/
68
- region_size = (size - (aligned - buf)) / n_regions;
69
+ region_size = (total_size - (aligned - buf)) / n_regions;
70
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
71
72
/* A region must have at least 2 pages; one code, one guard */
73
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
74
region.start = buf;
75
region.start_aligned = aligned;
76
/* page-align the end, since its last page will be a guard page */
77
- region.end = QEMU_ALIGN_PTR_DOWN(buf + size, page_size);
78
+ end = QEMU_ALIGN_PTR_DOWN(buf + total_size, page_size);
79
/* account for that last guard page */
80
- region.end -= page_size;
81
+ end -= page_size;
82
+ total_size = end - aligned;
83
+ region.total_size = total_size;
84
85
/*
86
* Set guard pages in the rw buffer, as that's the one into which
87
@@ -XXX,XX +XXX,XX @@ void tcg_region_prologue_set(TCGContext *s)
88
89
/* Register the balance of the buffer with gdb. */
90
tcg_register_jit(tcg_splitwx_to_rx(region.start),
91
- region.end - region.start);
92
+ region.start_aligned + region.total_size - region.start);
93
}
15
}
94
16
95
/*
17
/* Return a maximum finite value with the requested sign */
96
@@ -XXX,XX +XXX,XX @@ size_t tcg_code_capacity(void)
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
97
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
98
/* no need for synchronization; these variables are set at init time */
20
-{ \
99
guard_size = region.stride - region.size;
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
100
- capacity = region.end + guard_size - region.start;
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
101
- capacity -= region.n * (guard_size + TCG_HIGHWATER);
23
- /* result zero */ \
102
+ capacity = region.total_size;
24
- switch (fp_status->float_rounding_mode) { \
103
+ capacity -= (region.n - 1) * guard_size;
25
- case float_round_down: \
104
+ capacity -= region.n * TCG_HIGHWATER;
26
- return zero_##SUFFIX(1); \
105
+
27
- default: \
106
return capacity;
28
- return zero_##SUFFIX(0); \
29
- } \
30
- } \
31
- /* Normalize right */ \
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
107
}
271
}
108
272
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
274
-
275
float64 internal_mpyhh(float64 a, float64 b,
276
unsigned long long int accumulated,
277
float_status *fp_status)
109
--
278
--
110
2.25.1
279
2.43.0
111
112
diff view generated by jsdifflib
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
1
This structure, with bitfields, is incorrect for big-endian.
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
Use the existing float32_getexp_raw which uses extract32.
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
meson.build | 4 +---
7
target/hexagon/fma_emu.c | 16 +++-------------
7
fpu/meson.build | 1 +
8
1 file changed, 3 insertions(+), 13 deletions(-)
8
2 files changed, 2 insertions(+), 3 deletions(-)
9
create mode 100644 fpu/meson.build
10
9
11
diff --git a/meson.build b/meson.build
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/meson.build
12
--- a/target/hexagon/fma_emu.c
14
+++ b/meson.build
13
+++ b/target/hexagon/fma_emu.c
15
@@ -XXX,XX +XXX,XX @@ subdir('softmmu')
14
@@ -XXX,XX +XXX,XX @@ typedef union {
16
15
};
17
common_ss.add(capstone)
16
} Double;
18
specific_ss.add(files('cpu.c', 'disas.c', 'gdbstub.c'), capstone)
17
19
-specific_ss.add(when: 'CONFIG_TCG', if_true: files(
18
-typedef union {
20
- 'fpu/softfloat.c',
19
- float f;
21
-))
20
- uint32_t i;
22
21
- struct {
23
# Work around a gcc bug/misfeature wherein constant propagation looks
22
- uint32_t mant:23;
24
# through an alias:
23
- uint32_t exp:8;
25
@@ -XXX,XX +XXX,XX @@ subdir('replay')
24
- uint32_t sign:1;
26
subdir('semihosting')
25
- };
27
subdir('hw')
26
-} Float;
28
subdir('tcg')
27
-
29
+subdir('fpu')
28
static uint64_t float64_getmant(float64 f64)
30
subdir('accel')
29
{
31
subdir('plugins')
30
Double a = { .i = f64 };
32
subdir('bsd-user')
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
33
diff --git a/fpu/meson.build b/fpu/meson.build
32
34
new file mode 100644
33
int32_t float32_getexp(float32 f32)
35
index XXXXXXX..XXXXXXX
34
{
36
--- /dev/null
35
- Float a = { .i = f32 };
37
+++ b/fpu/meson.build
36
+ int exp = float32_getexp_raw(f32);
38
@@ -0,0 +1 @@
37
if (float32_is_normal(f32)) {
39
+specific_ss.add(when: 'CONFIG_TCG', if_true: files('softfloat.c'))
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
40
--
47
--
41
2.25.1
48
2.43.0
42
43
diff view generated by jsdifflib
1
Remove the ifdef ladder and move each define into the
1
This structure, with bitfields, is incorrect for big-endian.
2
appropriate header file.
2
Use extract64 and deposit64 instead.
3
3
4
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/aarch64/tcg-target.h | 1 +
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
9
tcg/arm/tcg-target.h | 1 +
8
1 file changed, 16 insertions(+), 30 deletions(-)
10
tcg/i386/tcg-target.h | 2 ++
11
tcg/mips/tcg-target.h | 6 ++++++
12
tcg/ppc/tcg-target.h | 2 ++
13
tcg/riscv/tcg-target.h | 1 +
14
tcg/s390/tcg-target.h | 3 +++
15
tcg/sparc/tcg-target.h | 1 +
16
tcg/tci/tcg-target.h | 1 +
17
tcg/region.c | 33 +++++----------------------------
18
10 files changed, 23 insertions(+), 28 deletions(-)
19
9
20
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
21
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/aarch64/tcg-target.h
12
--- a/target/hexagon/fma_emu.c
23
+++ b/tcg/aarch64/tcg-target.h
13
+++ b/target/hexagon/fma_emu.c
24
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@
25
15
26
#define TCG_TARGET_INSN_UNIT_SIZE 4
16
#define WAY_BIG_EXP 4096
27
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 24
17
28
+#define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
18
-typedef union {
29
#undef TCG_TARGET_STACK_GROWSUP
19
- double f;
30
20
- uint64_t i;
31
typedef enum {
21
- struct {
32
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
22
- uint64_t mant:52;
33
index XXXXXXX..XXXXXXX 100644
23
- uint64_t exp:11;
34
--- a/tcg/arm/tcg-target.h
24
- uint64_t sign:1;
35
+++ b/tcg/arm/tcg-target.h
25
- };
36
@@ -XXX,XX +XXX,XX @@ extern int arm_arch;
26
-} Double;
37
#undef TCG_TARGET_STACK_GROWSUP
27
-
38
#define TCG_TARGET_INSN_UNIT_SIZE 4
28
static uint64_t float64_getmant(float64 f64)
39
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16
29
{
40
+#define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX
30
- Double a = { .i = f64 };
41
31
+ uint64_t mant = extract64(f64, 0, 52);
42
typedef enum {
32
if (float64_is_normal(f64)) {
43
TCG_REG_R0 = 0,
33
- return a.mant | 1ULL << 52;
44
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
34
+ return mant | 1ULL << 52;
45
index XXXXXXX..XXXXXXX 100644
35
}
46
--- a/tcg/i386/tcg-target.h
36
if (float64_is_zero(f64)) {
47
+++ b/tcg/i386/tcg-target.h
37
return 0;
48
@@ -XXX,XX +XXX,XX @@
38
}
49
#ifdef __x86_64__
39
if (float64_is_denormal(f64)) {
50
# define TCG_TARGET_REG_BITS 64
40
- return a.mant;
51
# define TCG_TARGET_NB_REGS 32
41
+ return mant;
52
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
42
}
53
#else
43
return ~0ULL;
54
# define TCG_TARGET_REG_BITS 32
44
}
55
# define TCG_TARGET_NB_REGS 24
45
56
+# define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX
46
int32_t float64_getexp(float64 f64)
57
#endif
47
{
58
48
- Double a = { .i = f64 };
59
typedef enum {
49
+ int exp = extract64(f64, 52, 11);
60
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
50
if (float64_is_normal(f64)) {
61
index XXXXXXX..XXXXXXX 100644
51
- return a.exp;
62
--- a/tcg/mips/tcg-target.h
52
+ return exp;
63
+++ b/tcg/mips/tcg-target.h
53
}
64
@@ -XXX,XX +XXX,XX @@
54
if (float64_is_denormal(f64)) {
65
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 16
55
- return a.exp + 1;
66
#define TCG_TARGET_NB_REGS 32
56
+ return exp + 1;
67
57
}
68
+/*
58
return -1;
69
+ * We have a 256MB branch region, but leave room to make sure the
59
}
70
+ * main executable is also within that region.
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
71
+ */
61
/* Return a maximum finite value with the requested sign */
72
+#define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
64
+ uint64_t ret;
73
+
65
+
74
typedef enum {
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
75
TCG_REG_ZERO = 0,
67
&& ((a.guard | a.round | a.sticky) == 0)) {
76
TCG_REG_AT,
68
/* result zero */
77
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
78
index XXXXXXX..XXXXXXX 100644
70
}
79
--- a/tcg/ppc/tcg-target.h
71
}
80
+++ b/tcg/ppc/tcg-target.h
72
/* Underflow? */
81
@@ -XXX,XX +XXX,XX @@
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
82
74
+ ret = int128_getlo(a.mant);
83
#ifdef _ARCH_PPC64
75
+ if (ret & (1ULL << DF_MANTBITS)) {
84
# define TCG_TARGET_REG_BITS 64
76
/* Leading one means: No, we're normal. So, we should be done... */
85
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
77
- Double ret;
86
#else
78
- ret.i = 0;
87
# define TCG_TARGET_REG_BITS 32
79
- ret.sign = a.sign;
88
+# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
80
- ret.exp = a.exp;
89
#endif
81
- ret.mant = int128_getlo(a.mant);
90
82
- return ret.i;
91
#define TCG_TARGET_NB_REGS 64
83
+ ret = deposit64(ret, 52, 11, a.exp);
92
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
84
+ } else {
93
index XXXXXXX..XXXXXXX 100644
85
+ assert(a.exp == 1);
94
--- a/tcg/riscv/tcg-target.h
86
+ ret = deposit64(ret, 52, 11, 0);
95
+++ b/tcg/riscv/tcg-target.h
87
}
96
@@ -XXX,XX +XXX,XX @@
88
- assert(a.exp == 1);
97
#define TCG_TARGET_INSN_UNIT_SIZE 4
89
- Double ret;
98
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 20
90
- ret.i = 0;
99
#define TCG_TARGET_NB_REGS 32
91
- ret.sign = a.sign;
100
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
92
- ret.exp = 0;
101
93
- ret.mant = int128_getlo(a.mant);
102
typedef enum {
94
- return ret.i;
103
TCG_REG_ZERO,
95
+ ret = deposit64(ret, 63, 1, a.sign);
104
diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h
96
+ return ret;
105
index XXXXXXX..XXXXXXX 100644
97
}
106
--- a/tcg/s390/tcg-target.h
98
107
+++ b/tcg/s390/tcg-target.h
99
float64 internal_mpyhh(float64 a, float64 b,
108
@@ -XXX,XX +XXX,XX @@
109
#define TCG_TARGET_INSN_UNIT_SIZE 2
110
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 19
111
112
+/* We have a +- 4GB range on the branches; leave some slop. */
113
+#define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
114
+
115
typedef enum TCGReg {
116
TCG_REG_R0 = 0,
117
TCG_REG_R1,
118
diff --git a/tcg/sparc/tcg-target.h b/tcg/sparc/tcg-target.h
119
index XXXXXXX..XXXXXXX 100644
120
--- a/tcg/sparc/tcg-target.h
121
+++ b/tcg/sparc/tcg-target.h
122
@@ -XXX,XX +XXX,XX @@
123
#define TCG_TARGET_INSN_UNIT_SIZE 4
124
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32
125
#define TCG_TARGET_NB_REGS 32
126
+#define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
127
128
typedef enum {
129
TCG_REG_G0 = 0,
130
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
131
index XXXXXXX..XXXXXXX 100644
132
--- a/tcg/tci/tcg-target.h
133
+++ b/tcg/tci/tcg-target.h
134
@@ -XXX,XX +XXX,XX @@
135
#define TCG_TARGET_INTERPRETER 1
136
#define TCG_TARGET_INSN_UNIT_SIZE 1
137
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 32
138
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
139
140
#if UINTPTR_MAX == UINT32_MAX
141
# define TCG_TARGET_REG_BITS 32
142
diff --git a/tcg/region.c b/tcg/region.c
143
index XXXXXXX..XXXXXXX 100644
144
--- a/tcg/region.c
145
+++ b/tcg/region.c
146
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(unsigned max_cpus)
147
/*
148
* Minimum size of the code gen buffer. This number is randomly chosen,
149
* but not so small that we can't have a fair number of TB's live.
150
+ *
151
+ * Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h.
152
+ * Unless otherwise indicated, this is constrained by the range of
153
+ * direct branches on the host cpu, as used by the TCG implementation
154
+ * of goto_tb.
155
*/
156
#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
157
158
-/*
159
- * Maximum size of the code gen buffer we'd like to use. Unless otherwise
160
- * indicated, this is constrained by the range of direct branches on the
161
- * host cpu, as used by the TCG implementation of goto_tb.
162
- */
163
-#if defined(__x86_64__)
164
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
165
-#elif defined(__sparc__)
166
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
167
-#elif defined(__powerpc64__)
168
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
169
-#elif defined(__powerpc__)
170
-# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
171
-#elif defined(__aarch64__)
172
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
173
-#elif defined(__s390x__)
174
- /* We have a +- 4GB range on the branches; leave some slop. */
175
-# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
176
-#elif defined(__mips__)
177
- /*
178
- * We have a 256MB branch region, but leave room to make sure the
179
- * main executable is also within that region.
180
- */
181
-# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
182
-#else
183
-# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
184
-#endif
185
-
186
#if TCG_TARGET_REG_BITS == 32
187
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
188
#ifdef CONFIG_USER_ONLY
189
--
100
--
190
2.25.1
101
2.43.0
191
192
diff view generated by jsdifflib
1
Buffer management is integral to tcg. Do not leave the allocation
1
No need to open-code 64x64->128-bit multiplication.
2
to code outside of tcg/. This is code movement, with further
3
cleanups to follow.
4
2
5
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
include/tcg/tcg.h | 2 +-
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
10
accel/tcg/translate-all.c | 414 +-----------------------------------
7
1 file changed, 3 insertions(+), 29 deletions(-)
11
tcg/region.c | 431 +++++++++++++++++++++++++++++++++++++-
12
3 files changed, 428 insertions(+), 419 deletions(-)
13
8
14
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
15
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg.h
11
--- a/target/hexagon/fma_emu.c
17
+++ b/include/tcg/tcg.h
12
+++ b/target/hexagon/fma_emu.c
18
@@ -XXX,XX +XXX,XX @@ void *tcg_malloc_internal(TCGContext *s, int size);
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
19
void tcg_pool_reset(TCGContext *s);
14
return -1;
20
TranslationBlock *tcg_tb_alloc(TCGContext *s);
21
22
-void tcg_region_init(void);
23
+void tcg_region_init(size_t tb_size, int splitwx);
24
void tb_destroy(TranslationBlock *tb);
25
void tcg_region_reset_all(void);
26
27
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/accel/tcg/translate-all.c
30
+++ b/accel/tcg/translate-all.c
31
@@ -XXX,XX +XXX,XX @@
32
*/
33
34
#include "qemu/osdep.h"
35
-#include "qemu/units.h"
36
#include "qemu-common.h"
37
38
#define NO_CPU_IO_DEFS
39
@@ -XXX,XX +XXX,XX @@
40
#include "exec/cputlb.h"
41
#include "exec/translate-all.h"
42
#include "qemu/bitmap.h"
43
-#include "qemu/error-report.h"
44
#include "qemu/qemu-print.h"
45
#include "qemu/timer.h"
46
#include "qemu/main-loop.h"
47
@@ -XXX,XX +XXX,XX @@ static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
48
}
49
}
15
}
50
16
51
-/* Minimum size of the code gen buffer. This number is randomly chosen,
17
-static uint32_t int128_getw0(Int128 x)
52
- but not so small that we can't have a fair number of TB's live. */
53
-#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
54
-
55
-/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
56
- indicated, this is constrained by the range of direct branches on the
57
- host cpu, as used by the TCG implementation of goto_tb. */
58
-#if defined(__x86_64__)
59
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
60
-#elif defined(__sparc__)
61
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
62
-#elif defined(__powerpc64__)
63
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
64
-#elif defined(__powerpc__)
65
-# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
66
-#elif defined(__aarch64__)
67
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
68
-#elif defined(__s390x__)
69
- /* We have a +- 4GB range on the branches; leave some slop. */
70
-# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
71
-#elif defined(__mips__)
72
- /* We have a 256MB branch region, but leave room to make sure the
73
- main executable is also within that region. */
74
-# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
75
-#else
76
-# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
77
-#endif
78
-
79
-#if TCG_TARGET_REG_BITS == 32
80
-#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
81
-#ifdef CONFIG_USER_ONLY
82
-/*
83
- * For user mode on smaller 32 bit systems we may run into trouble
84
- * allocating big chunks of data in the right place. On these systems
85
- * we utilise a static code generation buffer directly in the binary.
86
- */
87
-#define USE_STATIC_CODE_GEN_BUFFER
88
-#endif
89
-#else /* TCG_TARGET_REG_BITS == 64 */
90
-#ifdef CONFIG_USER_ONLY
91
-/*
92
- * As user-mode emulation typically means running multiple instances
93
- * of the translator don't go too nuts with our default code gen
94
- * buffer lest we make things too hard for the OS.
95
- */
96
-#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
97
-#else
98
-/*
99
- * We expect most system emulation to run one or two guests per host.
100
- * Users running large scale system emulation may want to tweak their
101
- * runtime setup via the tb-size control on the command line.
102
- */
103
-#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
104
-#endif
105
-#endif
106
-
107
-#define DEFAULT_CODE_GEN_BUFFER_SIZE \
108
- (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
109
- ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
110
-
111
-static size_t size_code_gen_buffer(size_t tb_size)
112
-{
18
-{
113
- /* Size the buffer. */
19
- return int128_getlo(x);
114
- if (tb_size == 0) {
115
- size_t phys_mem = qemu_get_host_physmem();
116
- if (phys_mem == 0) {
117
- tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
118
- } else {
119
- tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
120
- }
121
- }
122
- if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
123
- tb_size = MIN_CODE_GEN_BUFFER_SIZE;
124
- }
125
- if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
126
- tb_size = MAX_CODE_GEN_BUFFER_SIZE;
127
- }
128
- return tb_size;
129
-}
20
-}
130
-
21
-
131
-#ifdef __mips__
22
-static uint32_t int128_getw1(Int128 x)
132
-/* In order to use J and JAL within the code_gen_buffer, we require
133
- that the buffer not cross a 256MB boundary. */
134
-static inline bool cross_256mb(void *addr, size_t size)
135
-{
23
-{
136
- return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
24
- return int128_getlo(x) >> 32;
137
-}
25
-}
138
-
26
-
139
-/* We weren't able to allocate a buffer without crossing that boundary,
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
140
- so make do with the larger portion of the buffer that doesn't cross.
28
{
141
- Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
29
- Int128 a, b;
142
-static inline void *split_cross_256mb(void *buf1, size_t size1)
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
143
-{
31
+ uint64_t l, h;
144
- void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
32
145
- size_t size2 = buf1 + size1 - buf2;
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
146
-
39
-
147
- size1 = buf2 - buf1;
40
- pp1s = pp1a + pp1b;
148
- if (size1 < size2) {
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
149
- size1 = size2;
42
- pp2 += (1ULL << 32);
150
- buf1 = buf2;
43
- }
44
- uint64_t ret_low = pp0 + (pp1s << 32);
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
46
- pp2 += 1;
151
- }
47
- }
152
-
48
-
153
- tcg_ctx->code_gen_buffer_size = size1;
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
154
- return buf1;
50
+ mulu64(&l, &h, ai, bi);
155
-}
51
+ return int128_make128(l, h);
156
-#endif
157
-
158
-#ifdef USE_STATIC_CODE_GEN_BUFFER
159
-static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
160
- __attribute__((aligned(CODE_GEN_ALIGN)));
161
-
162
-static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
163
-{
164
- void *buf, *end;
165
- size_t size;
166
-
167
- if (splitwx > 0) {
168
- error_setg(errp, "jit split-wx not supported");
169
- return false;
170
- }
171
-
172
- /* page-align the beginning and end of the buffer */
173
- buf = static_code_gen_buffer;
174
- end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
175
- buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
176
- end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
177
-
178
- size = end - buf;
179
-
180
- /* Honor a command-line option limiting the size of the buffer. */
181
- if (size > tb_size) {
182
- size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
183
- }
184
- tcg_ctx->code_gen_buffer_size = size;
185
-
186
-#ifdef __mips__
187
- if (cross_256mb(buf, size)) {
188
- buf = split_cross_256mb(buf, size);
189
- size = tcg_ctx->code_gen_buffer_size;
190
- }
191
-#endif
192
-
193
- if (qemu_mprotect_rwx(buf, size)) {
194
- error_setg_errno(errp, errno, "mprotect of jit buffer");
195
- return false;
196
- }
197
- qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
198
-
199
- tcg_ctx->code_gen_buffer = buf;
200
- return true;
201
-}
202
-#elif defined(_WIN32)
203
-static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
204
-{
205
- void *buf;
206
-
207
- if (splitwx > 0) {
208
- error_setg(errp, "jit split-wx not supported");
209
- return false;
210
- }
211
-
212
- buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
213
- PAGE_EXECUTE_READWRITE);
214
- if (buf == NULL) {
215
- error_setg_win32(errp, GetLastError(),
216
- "allocate %zu bytes for jit buffer", size);
217
- return false;
218
- }
219
-
220
- tcg_ctx->code_gen_buffer = buf;
221
- tcg_ctx->code_gen_buffer_size = size;
222
- return true;
223
-}
224
-#else
225
-static bool alloc_code_gen_buffer_anon(size_t size, int prot,
226
- int flags, Error **errp)
227
-{
228
- void *buf;
229
-
230
- buf = mmap(NULL, size, prot, flags, -1, 0);
231
- if (buf == MAP_FAILED) {
232
- error_setg_errno(errp, errno,
233
- "allocate %zu bytes for jit buffer", size);
234
- return false;
235
- }
236
- tcg_ctx->code_gen_buffer_size = size;
237
-
238
-#ifdef __mips__
239
- if (cross_256mb(buf, size)) {
240
- /*
241
- * Try again, with the original still mapped, to avoid re-acquiring
242
- * the same 256mb crossing.
243
- */
244
- size_t size2;
245
- void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
246
- switch ((int)(buf2 != MAP_FAILED)) {
247
- case 1:
248
- if (!cross_256mb(buf2, size)) {
249
- /* Success! Use the new buffer. */
250
- munmap(buf, size);
251
- break;
252
- }
253
- /* Failure. Work with what we had. */
254
- munmap(buf2, size);
255
- /* fallthru */
256
- default:
257
- /* Split the original buffer. Free the smaller half. */
258
- buf2 = split_cross_256mb(buf, size);
259
- size2 = tcg_ctx->code_gen_buffer_size;
260
- if (buf == buf2) {
261
- munmap(buf + size2, size - size2);
262
- } else {
263
- munmap(buf, size - size2);
264
- }
265
- size = size2;
266
- break;
267
- }
268
- buf = buf2;
269
- }
270
-#endif
271
-
272
- /* Request large pages for the buffer. */
273
- qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
274
-
275
- tcg_ctx->code_gen_buffer = buf;
276
- return true;
277
-}
278
-
279
-#ifndef CONFIG_TCG_INTERPRETER
280
-#ifdef CONFIG_POSIX
281
-#include "qemu/memfd.h"
282
-
283
-static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
284
-{
285
- void *buf_rw = NULL, *buf_rx = MAP_FAILED;
286
- int fd = -1;
287
-
288
-#ifdef __mips__
289
- /* Find space for the RX mapping, vs the 256MiB regions. */
290
- if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
291
- MAP_PRIVATE | MAP_ANONYMOUS |
292
- MAP_NORESERVE, errp)) {
293
- return false;
294
- }
295
- /* The size of the mapping may have been adjusted. */
296
- size = tcg_ctx->code_gen_buffer_size;
297
- buf_rx = tcg_ctx->code_gen_buffer;
298
-#endif
299
-
300
- buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
301
- if (buf_rw == NULL) {
302
- goto fail;
303
- }
304
-
305
-#ifdef __mips__
306
- void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
307
- MAP_SHARED | MAP_FIXED, fd, 0);
308
- if (tmp != buf_rx) {
309
- goto fail_rx;
310
- }
311
-#else
312
- buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
313
- if (buf_rx == MAP_FAILED) {
314
- goto fail_rx;
315
- }
316
-#endif
317
-
318
- close(fd);
319
- tcg_ctx->code_gen_buffer = buf_rw;
320
- tcg_ctx->code_gen_buffer_size = size;
321
- tcg_splitwx_diff = buf_rx - buf_rw;
322
-
323
- /* Request large pages for the buffer and the splitwx. */
324
- qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
325
- qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
326
- return true;
327
-
328
- fail_rx:
329
- error_setg_errno(errp, errno, "failed to map shared memory for execute");
330
- fail:
331
- if (buf_rx != MAP_FAILED) {
332
- munmap(buf_rx, size);
333
- }
334
- if (buf_rw) {
335
- munmap(buf_rw, size);
336
- }
337
- if (fd >= 0) {
338
- close(fd);
339
- }
340
- return false;
341
-}
342
-#endif /* CONFIG_POSIX */
343
-
344
-#ifdef CONFIG_DARWIN
345
-#include <mach/mach.h>
346
-
347
-extern kern_return_t mach_vm_remap(vm_map_t target_task,
348
- mach_vm_address_t *target_address,
349
- mach_vm_size_t size,
350
- mach_vm_offset_t mask,
351
- int flags,
352
- vm_map_t src_task,
353
- mach_vm_address_t src_address,
354
- boolean_t copy,
355
- vm_prot_t *cur_protection,
356
- vm_prot_t *max_protection,
357
- vm_inherit_t inheritance);
358
-
359
-static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
360
-{
361
- kern_return_t ret;
362
- mach_vm_address_t buf_rw, buf_rx;
363
- vm_prot_t cur_prot, max_prot;
364
-
365
- /* Map the read-write portion via normal anon memory. */
366
- if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
367
- MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
368
- return false;
369
- }
370
-
371
- buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
372
- buf_rx = 0;
373
- ret = mach_vm_remap(mach_task_self(),
374
- &buf_rx,
375
- size,
376
- 0,
377
- VM_FLAGS_ANYWHERE,
378
- mach_task_self(),
379
- buf_rw,
380
- false,
381
- &cur_prot,
382
- &max_prot,
383
- VM_INHERIT_NONE);
384
- if (ret != KERN_SUCCESS) {
385
- /* TODO: Convert "ret" to a human readable error message. */
386
- error_setg(errp, "vm_remap for jit splitwx failed");
387
- munmap((void *)buf_rw, size);
388
- return false;
389
- }
390
-
391
- if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
392
- error_setg_errno(errp, errno, "mprotect for jit splitwx");
393
- munmap((void *)buf_rx, size);
394
- munmap((void *)buf_rw, size);
395
- return false;
396
- }
397
-
398
- tcg_splitwx_diff = buf_rx - buf_rw;
399
- return true;
400
-}
401
-#endif /* CONFIG_DARWIN */
402
-#endif /* CONFIG_TCG_INTERPRETER */
403
-
404
-static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
405
-{
406
-#ifndef CONFIG_TCG_INTERPRETER
407
-# ifdef CONFIG_DARWIN
408
- return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
409
-# endif
410
-# ifdef CONFIG_POSIX
411
- return alloc_code_gen_buffer_splitwx_memfd(size, errp);
412
-# endif
413
-#endif
414
- error_setg(errp, "jit split-wx not supported");
415
- return false;
416
-}
417
-
418
-static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
419
-{
420
- ERRP_GUARD();
421
- int prot, flags;
422
-
423
- if (splitwx) {
424
- if (alloc_code_gen_buffer_splitwx(size, errp)) {
425
- return true;
426
- }
427
- /*
428
- * If splitwx force-on (1), fail;
429
- * if splitwx default-on (-1), fall through to splitwx off.
430
- */
431
- if (splitwx > 0) {
432
- return false;
433
- }
434
- error_free_or_abort(errp);
435
- }
436
-
437
- prot = PROT_READ | PROT_WRITE | PROT_EXEC;
438
- flags = MAP_PRIVATE | MAP_ANONYMOUS;
439
-#ifdef CONFIG_TCG_INTERPRETER
440
- /* The tcg interpreter does not need execute permission. */
441
- prot = PROT_READ | PROT_WRITE;
442
-#elif defined(CONFIG_DARWIN)
443
- /* Applicable to both iOS and macOS (Apple Silicon). */
444
- if (!splitwx) {
445
- flags |= MAP_JIT;
446
- }
447
-#endif
448
-
449
- return alloc_code_gen_buffer_anon(size, prot, flags, errp);
450
-}
451
-#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
452
-
453
static bool tb_cmp(const void *ap, const void *bp)
454
{
455
const TranslationBlock *a = ap;
456
@@ -XXX,XX +XXX,XX @@ static void tb_htable_init(void)
457
size. */
458
void tcg_exec_init(unsigned long tb_size, int splitwx)
459
{
460
- bool ok;
461
-
462
tcg_allowed = true;
463
tcg_context_init(&tcg_init_ctx);
464
page_init();
465
tb_htable_init();
466
-
467
- ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
468
- splitwx, &error_fatal);
469
- assert(ok);
470
-
471
- /* TODO: allocating regions is hand-in-glove with code_gen_buffer. */
472
- tcg_region_init();
473
+ tcg_region_init(tb_size, splitwx);
474
475
#if defined(CONFIG_SOFTMMU)
476
/* There's no guest base to take into account, so go ahead and
477
diff --git a/tcg/region.c b/tcg/region.c
478
index XXXXXXX..XXXXXXX 100644
479
--- a/tcg/region.c
480
+++ b/tcg/region.c
481
@@ -XXX,XX +XXX,XX @@
482
*/
483
484
#include "qemu/osdep.h"
485
+#include "qemu/units.h"
486
+#include "qapi/error.h"
487
#include "exec/exec-all.h"
488
#include "tcg/tcg.h"
489
#if !defined(CONFIG_USER_ONLY)
490
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(void)
491
}
52
}
492
#endif
53
493
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
494
+/*
495
+ * Minimum size of the code gen buffer. This number is randomly chosen,
496
+ * but not so small that we can't have a fair number of TB's live.
497
+ */
498
+#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
499
+
500
+/*
501
+ * Maximum size of the code gen buffer we'd like to use. Unless otherwise
502
+ * indicated, this is constrained by the range of direct branches on the
503
+ * host cpu, as used by the TCG implementation of goto_tb.
504
+ */
505
+#if defined(__x86_64__)
506
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
507
+#elif defined(__sparc__)
508
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
509
+#elif defined(__powerpc64__)
510
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
511
+#elif defined(__powerpc__)
512
+# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
513
+#elif defined(__aarch64__)
514
+# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
515
+#elif defined(__s390x__)
516
+ /* We have a +- 4GB range on the branches; leave some slop. */
517
+# define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB)
518
+#elif defined(__mips__)
519
+ /*
520
+ * We have a 256MB branch region, but leave room to make sure the
521
+ * main executable is also within that region.
522
+ */
523
+# define MAX_CODE_GEN_BUFFER_SIZE (128 * MiB)
524
+#else
525
+# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
526
+#endif
527
+
528
+#if TCG_TARGET_REG_BITS == 32
529
+#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
530
+#ifdef CONFIG_USER_ONLY
531
+/*
532
+ * For user mode on smaller 32 bit systems we may run into trouble
533
+ * allocating big chunks of data in the right place. On these systems
534
+ * we utilise a static code generation buffer directly in the binary.
535
+ */
536
+#define USE_STATIC_CODE_GEN_BUFFER
537
+#endif
538
+#else /* TCG_TARGET_REG_BITS == 64 */
539
+#ifdef CONFIG_USER_ONLY
540
+/*
541
+ * As user-mode emulation typically means running multiple instances
542
+ * of the translator don't go too nuts with our default code gen
543
+ * buffer lest we make things too hard for the OS.
544
+ */
545
+#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
546
+#else
547
+/*
548
+ * We expect most system emulation to run one or two guests per host.
549
+ * Users running large scale system emulation may want to tweak their
550
+ * runtime setup via the tb-size control on the command line.
551
+ */
552
+#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
553
+#endif
554
+#endif
555
+
556
+#define DEFAULT_CODE_GEN_BUFFER_SIZE \
557
+ (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
558
+ ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
559
+
560
+static size_t size_code_gen_buffer(size_t tb_size)
561
+{
562
+ /* Size the buffer. */
563
+ if (tb_size == 0) {
564
+ size_t phys_mem = qemu_get_host_physmem();
565
+ if (phys_mem == 0) {
566
+ tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
567
+ } else {
568
+ tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, phys_mem / 8);
569
+ }
570
+ }
571
+ if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
572
+ tb_size = MIN_CODE_GEN_BUFFER_SIZE;
573
+ }
574
+ if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
575
+ tb_size = MAX_CODE_GEN_BUFFER_SIZE;
576
+ }
577
+ return tb_size;
578
+}
579
+
580
+#ifdef __mips__
581
+/*
582
+ * In order to use J and JAL within the code_gen_buffer, we require
583
+ * that the buffer not cross a 256MB boundary.
584
+ */
585
+static inline bool cross_256mb(void *addr, size_t size)
586
+{
587
+ return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
588
+}
589
+
590
+/*
591
+ * We weren't able to allocate a buffer without crossing that boundary,
592
+ * so make do with the larger portion of the buffer that doesn't cross.
593
+ * Returns the new base of the buffer, and adjusts code_gen_buffer_size.
594
+ */
595
+static inline void *split_cross_256mb(void *buf1, size_t size1)
596
+{
597
+ void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
598
+ size_t size2 = buf1 + size1 - buf2;
599
+
600
+ size1 = buf2 - buf1;
601
+ if (size1 < size2) {
602
+ size1 = size2;
603
+ buf1 = buf2;
604
+ }
605
+
606
+ tcg_ctx->code_gen_buffer_size = size1;
607
+ return buf1;
608
+}
609
+#endif
610
+
611
+#ifdef USE_STATIC_CODE_GEN_BUFFER
612
+static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
613
+ __attribute__((aligned(CODE_GEN_ALIGN)));
614
+
615
+static bool alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
616
+{
617
+ void *buf, *end;
618
+ size_t size;
619
+
620
+ if (splitwx > 0) {
621
+ error_setg(errp, "jit split-wx not supported");
622
+ return false;
623
+ }
624
+
625
+ /* page-align the beginning and end of the buffer */
626
+ buf = static_code_gen_buffer;
627
+ end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
628
+ buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
629
+ end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
630
+
631
+ size = end - buf;
632
+
633
+ /* Honor a command-line option limiting the size of the buffer. */
634
+ if (size > tb_size) {
635
+ size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
636
+ }
637
+ tcg_ctx->code_gen_buffer_size = size;
638
+
639
+#ifdef __mips__
640
+ if (cross_256mb(buf, size)) {
641
+ buf = split_cross_256mb(buf, size);
642
+ size = tcg_ctx->code_gen_buffer_size;
643
+ }
644
+#endif
645
+
646
+ if (qemu_mprotect_rwx(buf, size)) {
647
+ error_setg_errno(errp, errno, "mprotect of jit buffer");
648
+ return false;
649
+ }
650
+ qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
651
+
652
+ tcg_ctx->code_gen_buffer = buf;
653
+ return true;
654
+}
655
+#elif defined(_WIN32)
656
+static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
657
+{
658
+ void *buf;
659
+
660
+ if (splitwx > 0) {
661
+ error_setg(errp, "jit split-wx not supported");
662
+ return false;
663
+ }
664
+
665
+ buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
666
+ PAGE_EXECUTE_READWRITE);
667
+ if (buf == NULL) {
668
+ error_setg_win32(errp, GetLastError(),
669
+ "allocate %zu bytes for jit buffer", size);
670
+ return false;
671
+ }
672
+
673
+ tcg_ctx->code_gen_buffer = buf;
674
+ tcg_ctx->code_gen_buffer_size = size;
675
+ return true;
676
+}
677
+#else
678
+static bool alloc_code_gen_buffer_anon(size_t size, int prot,
679
+ int flags, Error **errp)
680
+{
681
+ void *buf;
682
+
683
+ buf = mmap(NULL, size, prot, flags, -1, 0);
684
+ if (buf == MAP_FAILED) {
685
+ error_setg_errno(errp, errno,
686
+ "allocate %zu bytes for jit buffer", size);
687
+ return false;
688
+ }
689
+ tcg_ctx->code_gen_buffer_size = size;
690
+
691
+#ifdef __mips__
692
+ if (cross_256mb(buf, size)) {
693
+ /*
694
+ * Try again, with the original still mapped, to avoid re-acquiring
695
+ * the same 256mb crossing.
696
+ */
697
+ size_t size2;
698
+ void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
699
+ switch ((int)(buf2 != MAP_FAILED)) {
700
+ case 1:
701
+ if (!cross_256mb(buf2, size)) {
702
+ /* Success! Use the new buffer. */
703
+ munmap(buf, size);
704
+ break;
705
+ }
706
+ /* Failure. Work with what we had. */
707
+ munmap(buf2, size);
708
+ /* fallthru */
709
+ default:
710
+ /* Split the original buffer. Free the smaller half. */
711
+ buf2 = split_cross_256mb(buf, size);
712
+ size2 = tcg_ctx->code_gen_buffer_size;
713
+ if (buf == buf2) {
714
+ munmap(buf + size2, size - size2);
715
+ } else {
716
+ munmap(buf, size - size2);
717
+ }
718
+ size = size2;
719
+ break;
720
+ }
721
+ buf = buf2;
722
+ }
723
+#endif
724
+
725
+ /* Request large pages for the buffer. */
726
+ qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
727
+
728
+ tcg_ctx->code_gen_buffer = buf;
729
+ return true;
730
+}
731
+
732
+#ifndef CONFIG_TCG_INTERPRETER
733
+#ifdef CONFIG_POSIX
734
+#include "qemu/memfd.h"
735
+
736
+static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
737
+{
738
+ void *buf_rw = NULL, *buf_rx = MAP_FAILED;
739
+ int fd = -1;
740
+
741
+#ifdef __mips__
742
+ /* Find space for the RX mapping, vs the 256MiB regions. */
743
+ if (!alloc_code_gen_buffer_anon(size, PROT_NONE,
744
+ MAP_PRIVATE | MAP_ANONYMOUS |
745
+ MAP_NORESERVE, errp)) {
746
+ return false;
747
+ }
748
+ /* The size of the mapping may have been adjusted. */
749
+ size = tcg_ctx->code_gen_buffer_size;
750
+ buf_rx = tcg_ctx->code_gen_buffer;
751
+#endif
752
+
753
+ buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
754
+ if (buf_rw == NULL) {
755
+ goto fail;
756
+ }
757
+
758
+#ifdef __mips__
759
+ void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
760
+ MAP_SHARED | MAP_FIXED, fd, 0);
761
+ if (tmp != buf_rx) {
762
+ goto fail_rx;
763
+ }
764
+#else
765
+ buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
766
+ if (buf_rx == MAP_FAILED) {
767
+ goto fail_rx;
768
+ }
769
+#endif
770
+
771
+ close(fd);
772
+ tcg_ctx->code_gen_buffer = buf_rw;
773
+ tcg_ctx->code_gen_buffer_size = size;
774
+ tcg_splitwx_diff = buf_rx - buf_rw;
775
+
776
+ /* Request large pages for the buffer and the splitwx. */
777
+ qemu_madvise(buf_rw, size, QEMU_MADV_HUGEPAGE);
778
+ qemu_madvise(buf_rx, size, QEMU_MADV_HUGEPAGE);
779
+ return true;
780
+
781
+ fail_rx:
782
+ error_setg_errno(errp, errno, "failed to map shared memory for execute");
783
+ fail:
784
+ if (buf_rx != MAP_FAILED) {
785
+ munmap(buf_rx, size);
786
+ }
787
+ if (buf_rw) {
788
+ munmap(buf_rw, size);
789
+ }
790
+ if (fd >= 0) {
791
+ close(fd);
792
+ }
793
+ return false;
794
+}
795
+#endif /* CONFIG_POSIX */
796
+
797
+#ifdef CONFIG_DARWIN
798
+#include <mach/mach.h>
799
+
800
+extern kern_return_t mach_vm_remap(vm_map_t target_task,
801
+ mach_vm_address_t *target_address,
802
+ mach_vm_size_t size,
803
+ mach_vm_offset_t mask,
804
+ int flags,
805
+ vm_map_t src_task,
806
+ mach_vm_address_t src_address,
807
+ boolean_t copy,
808
+ vm_prot_t *cur_protection,
809
+ vm_prot_t *max_protection,
810
+ vm_inherit_t inheritance);
811
+
812
+static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
813
+{
814
+ kern_return_t ret;
815
+ mach_vm_address_t buf_rw, buf_rx;
816
+ vm_prot_t cur_prot, max_prot;
817
+
818
+ /* Map the read-write portion via normal anon memory. */
819
+ if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
820
+ MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
821
+ return false;
822
+ }
823
+
824
+ buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer;
825
+ buf_rx = 0;
826
+ ret = mach_vm_remap(mach_task_self(),
827
+ &buf_rx,
828
+ size,
829
+ 0,
830
+ VM_FLAGS_ANYWHERE,
831
+ mach_task_self(),
832
+ buf_rw,
833
+ false,
834
+ &cur_prot,
835
+ &max_prot,
836
+ VM_INHERIT_NONE);
837
+ if (ret != KERN_SUCCESS) {
838
+ /* TODO: Convert "ret" to a human readable error message. */
839
+ error_setg(errp, "vm_remap for jit splitwx failed");
840
+ munmap((void *)buf_rw, size);
841
+ return false;
842
+ }
843
+
844
+ if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
845
+ error_setg_errno(errp, errno, "mprotect for jit splitwx");
846
+ munmap((void *)buf_rx, size);
847
+ munmap((void *)buf_rw, size);
848
+ return false;
849
+ }
850
+
851
+ tcg_splitwx_diff = buf_rx - buf_rw;
852
+ return true;
853
+}
854
+#endif /* CONFIG_DARWIN */
855
+#endif /* CONFIG_TCG_INTERPRETER */
856
+
857
+static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
858
+{
859
+#ifndef CONFIG_TCG_INTERPRETER
860
+# ifdef CONFIG_DARWIN
861
+ return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
862
+# endif
863
+# ifdef CONFIG_POSIX
864
+ return alloc_code_gen_buffer_splitwx_memfd(size, errp);
865
+# endif
866
+#endif
867
+ error_setg(errp, "jit split-wx not supported");
868
+ return false;
869
+}
870
+
871
+static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
872
+{
873
+ ERRP_GUARD();
874
+ int prot, flags;
875
+
876
+ if (splitwx) {
877
+ if (alloc_code_gen_buffer_splitwx(size, errp)) {
878
+ return true;
879
+ }
880
+ /*
881
+ * If splitwx force-on (1), fail;
882
+ * if splitwx default-on (-1), fall through to splitwx off.
883
+ */
884
+ if (splitwx > 0) {
885
+ return false;
886
+ }
887
+ error_free_or_abort(errp);
888
+ }
889
+
890
+ prot = PROT_READ | PROT_WRITE | PROT_EXEC;
891
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
892
+#ifdef CONFIG_TCG_INTERPRETER
893
+ /* The tcg interpreter does not need execute permission. */
894
+ prot = PROT_READ | PROT_WRITE;
895
+#elif defined(CONFIG_DARWIN)
896
+ /* Applicable to both iOS and macOS (Apple Silicon). */
897
+ if (!splitwx) {
898
+ flags |= MAP_JIT;
899
+ }
900
+#endif
901
+
902
+ return alloc_code_gen_buffer_anon(size, prot, flags, errp);
903
+}
904
+#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
905
+
906
/*
907
* Initializes region partitioning.
908
*
909
@@ -XXX,XX +XXX,XX @@ static size_t tcg_n_regions(void)
910
* in practice. Multi-threaded guests share most if not all of their translated
911
* code, which makes parallel code generation less appealing than in softmmu.
912
*/
913
-void tcg_region_init(void)
914
+void tcg_region_init(size_t tb_size, int splitwx)
915
{
916
- void *buf = tcg_init_ctx.code_gen_buffer;
917
- void *aligned;
918
- size_t size = tcg_init_ctx.code_gen_buffer_size;
919
- size_t page_size = qemu_real_host_page_size;
920
+ void *buf, *aligned;
921
+ size_t size;
922
+ size_t page_size;
923
size_t region_size;
924
size_t n_regions;
925
size_t i;
926
+ bool ok;
927
928
+ ok = alloc_code_gen_buffer(size_code_gen_buffer(tb_size),
929
+ splitwx, &error_fatal);
930
+ assert(ok);
931
+
932
+ buf = tcg_init_ctx.code_gen_buffer;
933
+ size = tcg_init_ctx.code_gen_buffer_size;
934
+ page_size = qemu_real_host_page_size;
935
n_regions = tcg_n_regions();
936
937
/* The first region will be 'aligned - buf' bytes larger than the others */
938
--
55
--
939
2.25.1
56
2.43.0
940
941
diff view generated by jsdifflib
1
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
1
Initialize x with accumulated via direct assignment,
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
rather than multiplying by 1.
3
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
---
6
meson.build | 8 +-------
7
target/hexagon/fma_emu.c | 2 +-
7
tcg/meson.build | 13 +++++++++++++
8
1 file changed, 1 insertion(+), 1 deletion(-)
8
2 files changed, 14 insertions(+), 7 deletions(-)
9
create mode 100644 tcg/meson.build
10
9
11
diff --git a/meson.build b/meson.build
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
12
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
13
--- a/meson.build
12
--- a/target/hexagon/fma_emu.c
14
+++ b/meson.build
13
+++ b/target/hexagon/fma_emu.c
15
@@ -XXX,XX +XXX,XX @@ common_ss.add(capstone)
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
16
specific_ss.add(files('cpu.c', 'disas.c', 'gdbstub.c'), capstone)
15
float64_is_infinity(b)) {
17
specific_ss.add(when: 'CONFIG_TCG', if_true: files(
16
return float64_mul(a, b, fp_status);
18
'fpu/softfloat.c',
17
}
19
- 'tcg/optimize.c',
18
- x.mant = int128_mul_6464(accumulated, 1);
20
- 'tcg/tcg-common.c',
19
+ x.mant = int128_make64(accumulated);
21
- 'tcg/tcg-op-gvec.c',
20
x.sticky = sticky;
22
- 'tcg/tcg-op-vec.c',
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
23
- 'tcg/tcg-op.c',
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
24
- 'tcg/tcg.c',
25
))
26
-specific_ss.add(when: 'CONFIG_TCG_INTERPRETER', if_true: files('tcg/tci.c'))
27
28
# Work around a gcc bug/misfeature wherein constant propagation looks
29
# through an alias:
30
@@ -XXX,XX +XXX,XX @@ subdir('net')
31
subdir('replay')
32
subdir('semihosting')
33
subdir('hw')
34
+subdir('tcg')
35
subdir('accel')
36
subdir('plugins')
37
subdir('bsd-user')
38
diff --git a/tcg/meson.build b/tcg/meson.build
39
new file mode 100644
40
index XXXXXXX..XXXXXXX
41
--- /dev/null
42
+++ b/tcg/meson.build
43
@@ -XXX,XX +XXX,XX @@
44
+tcg_ss = ss.source_set()
45
+
46
+tcg_ss.add(files(
47
+ 'optimize.c',
48
+ 'tcg.c',
49
+ 'tcg-common.c',
50
+ 'tcg-op.c',
51
+ 'tcg-op-gvec.c',
52
+ 'tcg-op-vec.c',
53
+))
54
+tcg_ss.add(when: 'CONFIG_TCG_INTERPRETER', if_true: files('tci.c'))
55
+
56
+specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
57
--
23
--
58
2.25.1
24
2.43.0
59
60
diff view generated by jsdifflib
1
These variables belong to the jit side, not the user side.
1
Convert all targets simultaneously, as the gen_intermediate_code
2
function disappears from the target. While there are possible
3
workarounds, they're larger than simply performing the conversion.
2
4
3
Since tcg_init_ctx is no longer used outside of tcg/, move
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
the declaration to tcg-internal.h.
5
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
8
Suggested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
include/tcg/tcg.h | 1 -
8
include/exec/translator.h | 14 --------------
12
tcg/tcg-internal.h | 1 +
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
13
accel/tcg/translate-all.c | 3 ---
10
target/alpha/cpu.h | 2 ++
14
tcg/tcg.c | 3 +++
11
target/arm/internals.h | 2 ++
15
4 files changed, 4 insertions(+), 4 deletions(-)
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
16
71
17
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
18
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg.h
74
--- a/include/exec/translator.h
20
+++ b/include/tcg/tcg.h
75
+++ b/include/exec/translator.h
21
@@ -XXX,XX +XXX,XX @@ static inline bool temp_readonly(TCGTemp *ts)
76
@@ -XXX,XX +XXX,XX @@
22
return ts->kind >= TEMP_FIXED;
77
#include "qemu/bswap.h"
78
#include "exec/vaddr.h"
79
80
-/**
81
- * gen_intermediate_code
82
- * @cpu: cpu context
83
- * @tb: translation block
84
- * @max_insns: max number of instructions to translate
85
- * @pc: guest virtual program counter address
86
- * @host_pc: host physical program counter address
87
- *
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
90
- */
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
93
-
94
/**
95
* DisasJumpType:
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
103
*/
104
void (*initialize)(void);
105
+ /**
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
23
}
152
}
24
153
25
-extern TCGContext tcg_init_ctx;
154
void avr_cpu_tcg_init(void);
26
extern __thread TCGContext *tcg_ctx;
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
27
extern const void *tcg_code_gen_epilogue;
156
+ int *max_insns, vaddr pc, void *host_pc);
28
extern uintptr_t tcg_splitwx_diff;
157
29
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
158
int cpu_avr_exec(CPUState *cpu);
30
index XXXXXXX..XXXXXXX 100644
159
31
--- a/tcg/tcg-internal.h
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
32
+++ b/tcg/tcg-internal.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
33
@@ -XXX,XX +XXX,XX @@
203
@@ -XXX,XX +XXX,XX @@
34
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
35
#define TCG_HIGHWATER 1024
205
36
206
void loongarch_translate_init(void);
37
+extern TCGContext tcg_init_ctx;
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
38
extern TCGContext **tcg_ctxs;
208
+ int *max_insns, vaddr pc, void *host_pc);
39
extern unsigned int tcg_cur_ctxs;
209
40
extern unsigned int tcg_max_ctxs;
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
41
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
42
index XXXXXXX..XXXXXXX 100644
391
index XXXXXXX..XXXXXXX 100644
43
--- a/accel/tcg/translate-all.c
392
--- a/accel/tcg/translate-all.c
44
+++ b/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
45
@@ -XXX,XX +XXX,XX @@ static int v_l2_levels;
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
46
395
47
static void *l1_map[V_L1_MAX_SIZE];
396
tcg_func_start(tcg_ctx);
48
397
49
-/* code generation context */
398
- tcg_ctx->cpu = env_cpu(env);
50
-TCGContext tcg_init_ctx;
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
51
-__thread TCGContext *tcg_ctx;
400
+ CPUState *cs = env_cpu(env);
52
TBContext tb_ctx;
401
+ tcg_ctx->cpu = cs;
53
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
54
static void page_table_config_init(void)
403
+
55
diff --git a/tcg/tcg.c b/tcg/tcg.c
404
assert(tb->size != 0);
56
index XXXXXXX..XXXXXXX 100644
405
tcg_ctx->cpu = NULL;
57
--- a/tcg/tcg.c
406
*max_insns = tb->icount;
58
+++ b/tcg/tcg.c
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
59
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
408
/*
60
static int tcg_out_ldst_finalize(TCGContext *s);
409
* Overflow of code_gen_buffer, or the current slice of it.
410
*
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
413
* should we re-do the tcg optimization currently hidden
414
* inside tcg_gen_code. All that should be required is to
415
* flush the TBs, allocate a new TB, re-initialize it per
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
417
index XXXXXXX..XXXXXXX 100644
418
--- a/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
421
422
static const TCGCPUOps alpha_tcg_ops = {
423
.initialize = alpha_translate_init,
424
+ .translate_code = alpha_translate_code,
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
426
.restore_state_to_opc = alpha_restore_state_to_opc,
427
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
429
index XXXXXXX..XXXXXXX 100644
430
--- a/target/alpha/translate.c
431
+++ b/target/alpha/translate.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
433
.tb_stop = alpha_tr_tb_stop,
434
};
435
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
61
#endif
563
#endif
62
564
};
63
+TCGContext tcg_init_ctx;
565
64
+__thread TCGContext *tcg_ctx;
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
65
+
567
- vaddr pc, void *host_pc)
66
TCGContext **tcg_ctxs;
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
67
unsigned int tcg_cur_ctxs;
569
+ int *max_insns, vaddr pc, void *host_pc)
68
unsigned int tcg_max_ctxs;
570
{
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
69
--
953
--
70
2.25.1
954
2.43.0
71
955
72
956
diff view generated by jsdifflib