1
Second pull for this week, since this set is large enough by itself.
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
r~
5
6
7
The following changes since commit 7c9236d6d61f30583d5d860097d88dbf0fe487bf:
8
9
Merge tag 'pull-tcg-20230116' of https://gitlab.com/rth7680/qemu into staging (2023-01-17 10:24:16 +0000)
10
4
11
are available in the Git repository at:
5
are available in the Git repository at:
12
6
13
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230117
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
14
8
15
for you to fetch changes up to 493c9b19a7fb7f387c4fcf57d3836504d5242bf5:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
16
10
17
tcg/riscv: Implement direct branch for goto_tb (2023-01-17 22:36:17 +0000)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
18
12
19
----------------------------------------------------------------
13
----------------------------------------------------------------
20
tcg: Fix race conditions in (most) goto_tb implementations
14
tcg/optimize: Remove in-flight mask data from OptContext
15
fpu: Add float*_muladd_scalbn
16
fpu: Remove float_muladd_halve_result
17
fpu: Add float_round_nearest_even_max
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
21
21
22
----------------------------------------------------------------
22
----------------------------------------------------------------
23
Richard Henderson (22):
23
Ilya Leoshkevich (1):
24
tcg: Split out tcg_out_exit_tb
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
25
tcg/i386: Remove unused goto_tb code for indirect jump
26
tcg/ppc: Remove unused goto_tb code for indirect jump
27
tcg/sparc64: Remove unused goto_tb code for indirect jump
28
tcg: Replace asserts on tcg_jmp_insn_offset
29
tcg: Introduce set_jmp_insn_offset
30
tcg: Introduce get_jmp_target_addr
31
tcg: Split out tcg_out_goto_tb
32
tcg: Rename TB_JMP_RESET_OFFSET_INVALID to TB_JMP_OFFSET_INVALID
33
tcg: Add gen_tb to TCGContext
34
tcg: Add TranslationBlock.jmp_insn_offset
35
tcg: Change tb_target_set_jmp_target arguments
36
tcg: Move tb_target_set_jmp_target declaration to tcg.h
37
tcg: Always define tb_target_set_jmp_target
38
tcg: Remove TCG_TARGET_HAS_direct_jump
39
tcg/aarch64: Reorg goto_tb implementation
40
tcg/ppc: Reorg goto_tb implementation
41
tcg/sparc64: Remove USE_REG_TB
42
tcg/sparc64: Reorg goto_tb implementation
43
tcg/arm: Implement direct branch for goto_tb
44
tcg/riscv: Introduce OPC_NOP
45
tcg/riscv: Implement direct branch for goto_tb
46
25
47
include/exec/exec-all.h | 5 +-
26
Pierrick Bouvier (1):
48
include/tcg/tcg.h | 14 ++-
27
plugins: optimize cpu_index code generation
49
tcg/aarch64/tcg-target.h | 6 +-
28
50
tcg/arm/tcg-target.h | 5 -
29
Richard Henderson (70):
51
tcg/i386/tcg-target.h | 9 --
30
tcg/optimize: Split out finish_bb, finish_ebb
52
tcg/loongarch64/tcg-target.h | 3 -
31
tcg/optimize: Split out fold_affected_mask
53
tcg/mips/tcg-target.h | 5 -
32
tcg/optimize: Copy mask writeback to fold_masks
54
tcg/ppc/tcg-target.h | 7 +-
33
tcg/optimize: Split out fold_masks_zs
55
tcg/riscv/tcg-target.h | 4 -
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
56
tcg/s390x/tcg-target.h | 11 ---
35
tcg/optimize: Change representation of s_mask
57
tcg/sparc64/tcg-target.h | 4 -
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
58
tcg/tci/tcg-target.h | 4 -
37
tcg/optimize: Introduce const value accessors for TempOptInfo
59
accel/tcg/cpu-exec.c | 21 ++--
38
tcg/optimize: Use fold_masks_zs in fold_and
60
accel/tcg/translate-all.c | 10 +-
39
tcg/optimize: Use fold_masks_zs in fold_andc
61
tcg/tcg-op.c | 14 +--
40
tcg/optimize: Use fold_masks_zs in fold_bswap
62
tcg/tcg.c | 42 +++++---
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
63
tcg/aarch64/tcg-target.c.inc | 106 ++++++++++-----------
42
tcg/optimize: Use fold_masks_z in fold_ctpop
64
tcg/arm/tcg-target.c.inc | 89 +++++++++++------
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
65
tcg/i386/tcg-target.c.inc | 68 +++++++------
44
tcg/optimize: Compute sign mask in fold_deposit
66
tcg/loongarch64/tcg-target.c.inc | 66 +++++++------
45
tcg/optimize: Use finish_folding in fold_divide
67
tcg/mips/tcg-target.c.inc | 59 +++++++-----
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
68
tcg/ppc/tcg-target.c.inc | 193 ++++++++++++-------------------------
47
tcg/optimize: Use fold_masks_s in fold_eqv
69
tcg/riscv/tcg-target.c.inc | 65 +++++++++----
48
tcg/optimize: Use fold_masks_z in fold_extract
70
tcg/s390x/tcg-target.c.inc | 67 ++++++++-----
49
tcg/optimize: Use finish_folding in fold_extract2
71
tcg/sparc64/tcg-target.c.inc | 201 +++++++++++++++------------------------
50
tcg/optimize: Use fold_masks_zs in fold_exts
72
tcg/tci/tcg-target.c.inc | 31 +++---
51
tcg/optimize: Use fold_masks_z in fold_extu
73
26 files changed, 528 insertions(+), 581 deletions(-)
52
tcg/optimize: Use fold_masks_zs in fold_movcond
53
tcg/optimize: Use finish_folding in fold_mul*
54
tcg/optimize: Use fold_masks_s in fold_nand
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
100
101
include/exec/translator.h | 14 -
102
include/fpu/softfloat-types.h | 2 +
103
include/fpu/softfloat.h | 14 +-
104
include/hw/core/tcg-cpu-ops.h | 13 +
105
target/alpha/cpu.h | 2 +
106
target/arm/internals.h | 2 +
107
target/avr/cpu.h | 2 +
108
target/hexagon/cpu.h | 2 +
109
target/hexagon/fma_emu.h | 3 -
110
target/hppa/cpu.h | 2 +
111
target/i386/tcg/helper-tcg.h | 2 +
112
target/loongarch/internals.h | 2 +
113
target/m68k/cpu.h | 2 +
114
target/microblaze/cpu.h | 2 +
115
target/mips/tcg/tcg-internal.h | 2 +
116
target/openrisc/cpu.h | 2 +
117
target/ppc/cpu.h | 2 +
118
target/riscv/cpu.h | 3 +
119
target/rx/cpu.h | 2 +
120
target/s390x/s390x-internal.h | 2 +
121
target/sh4/cpu.h | 2 +
122
target/sparc/cpu.h | 2 +
123
target/sparc/helper.h | 4 +-
124
target/tricore/cpu.h | 2 +
125
target/xtensa/cpu.h | 2 +
126
accel/tcg/cpu-exec.c | 8 +-
127
accel/tcg/plugin-gen.c | 9 +
128
accel/tcg/translate-all.c | 8 +-
129
fpu/softfloat.c | 63 +--
130
target/alpha/cpu.c | 1 +
131
target/alpha/translate.c | 4 +-
132
target/arm/cpu.c | 1 +
133
target/arm/tcg/cpu-v7m.c | 1 +
134
target/arm/tcg/helper-a64.c | 6 +-
135
target/arm/tcg/translate.c | 5 +-
136
target/avr/cpu.c | 1 +
137
target/avr/translate.c | 6 +-
138
target/hexagon/cpu.c | 1 +
139
target/hexagon/fma_emu.c | 496 ++++++---------------
140
target/hexagon/op_helper.c | 125 ++----
141
target/hexagon/translate.c | 4 +-
142
target/hppa/cpu.c | 1 +
143
target/hppa/translate.c | 4 +-
144
target/i386/tcg/tcg-cpu.c | 1 +
145
target/i386/tcg/translate.c | 5 +-
146
target/loongarch/cpu.c | 1 +
147
target/loongarch/tcg/translate.c | 4 +-
148
target/m68k/cpu.c | 1 +
149
target/m68k/translate.c | 4 +-
150
target/microblaze/cpu.c | 1 +
151
target/microblaze/translate.c | 4 +-
152
target/mips/cpu.c | 1 +
153
target/mips/tcg/translate.c | 4 +-
154
target/openrisc/cpu.c | 1 +
155
target/openrisc/translate.c | 4 +-
156
target/ppc/cpu_init.c | 1 +
157
target/ppc/translate.c | 4 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
159
target/riscv/translate.c | 4 +-
160
target/rx/cpu.c | 1 +
161
target/rx/translate.c | 4 +-
162
target/s390x/cpu.c | 1 +
163
target/s390x/tcg/translate.c | 4 +-
164
target/sh4/cpu.c | 1 +
165
target/sh4/translate.c | 4 +-
166
target/sparc/cpu.c | 1 +
167
target/sparc/fop_helper.c | 8 +-
168
target/sparc/translate.c | 84 ++--
169
target/tricore/cpu.c | 1 +
170
target/tricore/translate.c | 5 +-
171
target/xtensa/cpu.c | 1 +
172
target/xtensa/translate.c | 4 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
174
tests/tcg/multiarch/system/memory.c | 9 +-
175
fpu/softfloat-parts.c.inc | 16 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
Install empty versions for !TCG_TARGET_HAS_direct_jump hosts.
1
Call them directly from the opcode switch statement in tcg_optimize,
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
2
4
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
tcg/arm/tcg-target.c.inc | 6 ++++++
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
8
tcg/mips/tcg-target.c.inc | 6 ++++++
9
1 file changed, 31 insertions(+), 16 deletions(-)
9
tcg/riscv/tcg-target.c.inc | 6 ++++++
10
tcg/tci/tcg-target.c.inc | 6 ++++++
11
4 files changed, 24 insertions(+)
12
10
13
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/arm/tcg-target.c.inc
13
--- a/tcg/optimize.c
16
+++ b/tcg/arm/tcg-target.c.inc
14
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
18
set_jmp_reset_offset(s, which);
16
}
19
}
17
}
20
18
21
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
19
+static void finish_bb(OptContext *ctx)
22
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
23
+{
20
+{
24
+ /* Always indirect, nothing to do */
21
+ /* We only optimize memory barriers across basic blocks. */
22
+ ctx->prev_mb = NULL;
25
+}
23
+}
26
+
24
+
27
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
25
+static void finish_ebb(OptContext *ctx)
28
const TCGArg args[TCG_MAX_OP_ARGS],
29
const int const_args[TCG_MAX_OP_ARGS])
30
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/mips/tcg-target.c.inc
33
+++ b/tcg/mips/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
35
set_jmp_reset_offset(s, which);
36
}
37
38
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
39
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
40
+{
26
+{
41
+ /* Always indirect, nothing to do */
27
+ finish_bb(ctx);
28
+ /* We only optimize across extended basic blocks. */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
30
+ remove_mem_copy_all(ctx);
42
+}
31
+}
43
+
32
+
44
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
33
static void finish_folding(OptContext *ctx, TCGOp *op)
45
const TCGArg args[TCG_MAX_OP_ARGS],
34
{
46
const int const_args[TCG_MAX_OP_ARGS])
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
47
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
36
int i, nb_oargs;
48
index XXXXXXX..XXXXXXX 100644
37
49
--- a/tcg/riscv/tcg-target.c.inc
38
- /*
50
+++ b/tcg/riscv/tcg-target.c.inc
39
- * We only optimize extended basic blocks. If the opcode ends a BB
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
40
- * and is not a conditional branch, reset all temp data.
52
set_jmp_reset_offset(s, which);
41
- */
42
- if (def->flags & TCG_OPF_BB_END) {
43
- ctx->prev_mb = NULL;
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
46
- remove_mem_copy_all(ctx);
47
- }
48
- return;
49
- }
50
-
51
nb_oargs = def->nb_oargs;
52
for (i = 0; i < nb_oargs; i++) {
53
TCGTemp *ts = arg_temp(op->args[i]);
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
55
if (i > 0) {
56
op->opc = INDEX_op_br;
57
op->args[0] = op->args[3];
58
+ finish_ebb(ctx);
59
+ } else {
60
+ finish_bb(ctx);
61
}
62
- return false;
63
+ return true;
53
}
64
}
54
65
55
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
56
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
57
+{
68
}
58
+ /* Always indirect, nothing to do */
69
op->opc = INDEX_op_br;
59
+}
70
op->args[0] = label;
71
- break;
72
+ finish_ebb(ctx);
73
+ return true;
74
}
75
- return false;
60
+
76
+
61
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
77
+ finish_bb(ctx);
62
const TCGArg args[TCG_MAX_OP_ARGS],
78
+ return true;
63
const int const_args[TCG_MAX_OP_ARGS])
64
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/tci/tcg-target.c.inc
67
+++ b/tcg/tci/tcg-target.c.inc
68
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
69
set_jmp_reset_offset(s, which);
70
}
79
}
71
80
72
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
73
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
74
+{
83
CASE_OP_32_64_VEC(xor):
75
+ /* Always indirect, nothing to do */
84
done = fold_xor(&ctx, op);
76
+}
85
break;
77
+
86
+ case INDEX_op_set_label:
78
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
87
+ case INDEX_op_br:
79
const TCGArg args[TCG_MAX_OP_ARGS],
88
+ case INDEX_op_exit_tb:
80
const int const_args[TCG_MAX_OP_ARGS])
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
81
--
97
--
82
2.34.1
98
2.43.0
83
84
diff view generated by jsdifflib
New patch
1
There are only a few logical operations which can compute
2
an "affected" mask. Split out handling of this optimization
3
to a separate function, only to be called when applicable.
1
4
5
Remove the a_mask field from OptContext, as the mask is
6
no longer stored anywhere.
7
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
12
1 file changed, 27 insertions(+), 15 deletions(-)
13
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/optimize.c
17
+++ b/tcg/optimize.c
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
20
21
/* In flight values from optimization. */
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
24
uint64_t s_mask; /* mask of clrsb(value) bits */
25
TCGType type;
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
27
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
29
{
30
- uint64_t a_mask = ctx->a_mask;
31
uint64_t z_mask = ctx->z_mask;
32
uint64_t s_mask = ctx->s_mask;
33
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
35
* type changing opcodes.
36
*/
37
if (ctx->type == TCG_TYPE_I32) {
38
- a_mask = (int32_t)a_mask;
39
z_mask = (int32_t)z_mask;
40
s_mask |= MAKE_64BIT_MASK(32, 32);
41
ctx->z_mask = z_mask;
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
43
if (z_mask == 0) {
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
45
}
46
+ return false;
47
+}
48
+
49
+/*
50
+ * An "affected" mask bit is 0 if and only if the result is identical
51
+ * to the first input. Thus if the entire mask is 0, the operation
52
+ * is equivalent to a copy.
53
+ */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
55
+{
56
+ if (ctx->type == TCG_TYPE_I32) {
57
+ a_mask = (uint32_t)a_mask;
58
+ }
59
if (a_mask == 0) {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
61
}
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
63
* Known-zeros does not imply known-ones. Therefore unless
64
* arg2 is constant, we can't infer affected bits from it.
65
*/
66
- if (arg_is_const(op->args[2])) {
67
- ctx->a_mask = z1 & ~z2;
68
+ if (arg_is_const(op->args[2]) &&
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
70
+ return true;
71
}
72
73
return fold_masks(ctx, op);
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
75
*/
76
if (arg_is_const(op->args[2])) {
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
78
- ctx->a_mask = z1 & ~z2;
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
136
137
--
138
2.43.0
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
1
The INDEX_op_goto_tb opcode needs no register allocation.
1
Consider the passed s_mask to be a minimum deduced from
2
Split out a dedicated helper function for it.
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
3
5
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
tcg/tcg.c | 4 ++
9
tcg/optimize.c | 21 ++++++---------------
9
tcg/aarch64/tcg-target.c.inc | 40 ++++++++++---------
10
1 file changed, 6 insertions(+), 15 deletions(-)
10
tcg/arm/tcg-target.c.inc | 49 ++++++++++++-----------
11
tcg/i386/tcg-target.c.inc | 33 ++++++++--------
12
tcg/loongarch64/tcg-target.c.inc | 38 +++++++++---------
13
tcg/mips/tcg-target.c.inc | 21 +++++-----
14
tcg/ppc/tcg-target.c.inc | 52 ++++++++++++------------
15
tcg/riscv/tcg-target.c.inc | 20 +++++-----
16
tcg/s390x/tcg-target.c.inc | 31 ++++++++-------
17
tcg/sparc64/tcg-target.c.inc | 68 +++++++++++++++++---------------
18
tcg/tci/tcg-target.c.inc | 16 ++++----
19
11 files changed, 199 insertions(+), 173 deletions(-)
20
11
21
diff --git a/tcg/tcg.c b/tcg/tcg.c
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
22
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/tcg.c
14
--- a/tcg/optimize.c
24
+++ b/tcg/tcg.c
15
+++ b/tcg/optimize.c
25
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
26
static void tcg_out_movi(TCGContext *s, TCGType type,
17
* Record "zero" and "sign" masks for the single output of @op.
27
TCGReg ret, tcg_target_long arg);
18
* See TempOptInfo definition of z_mask and s_mask.
28
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
19
* If z_mask allows, fold the output to constant zero.
29
+static void tcg_out_goto_tb(TCGContext *s, int which);
20
+ * The passed s_mask may be augmented by z_mask.
30
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
21
*/
31
const TCGArg args[TCG_MAX_OP_ARGS],
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
32
const int const_args[TCG_MAX_OP_ARGS]);
23
uint64_t z_mask, uint64_t s_mask)
33
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
34
case INDEX_op_exit_tb:
25
35
tcg_out_exit_tb(s, op->args[0]);
26
ti = ts_info(ts);
36
break;
27
ti->z_mask = z_mask;
37
+ case INDEX_op_goto_tb:
28
- ti->s_mask = s_mask;
38
+ tcg_out_goto_tb(s, op->args[0]);
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
39
+ break;
30
return true;
40
case INDEX_op_dup2_vec:
41
if (tcg_reg_alloc_dup2(s, op)) {
42
break;
43
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/aarch64/tcg-target.c.inc
46
+++ b/tcg/aarch64/tcg-target.c.inc
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
48
}
49
}
31
}
50
32
51
+static void tcg_out_goto_tb(TCGContext *s, int which)
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
52
+{
53
+ /*
54
+ * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
55
+ * write can be used to patch the target address.
56
+ */
57
+ if ((uintptr_t)s->code_ptr & 7) {
58
+ tcg_out32(s, NOP);
59
+ }
60
+ set_jmp_insn_offset(s, which);
61
+ /*
62
+ * actual branch destination will be patched by
63
+ * tb_target_set_jmp_target later
64
+ */
65
+ tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
66
+ tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
67
+ tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
68
+ set_jmp_reset_offset(s, which);
69
+}
70
+
71
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
72
const TCGArg args[TCG_MAX_OP_ARGS],
73
const int const_args[TCG_MAX_OP_ARGS])
74
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
75
#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
76
77
switch (opc) {
78
- case INDEX_op_goto_tb:
79
- /*
80
- * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
81
- * write can be used to patch the target address.
82
- */
83
- if ((uintptr_t)s->code_ptr & 7) {
84
- tcg_out32(s, NOP);
85
- }
86
- set_jmp_insn_offset(s, a0);
87
- /*
88
- * actual branch destination will be patched by
89
- * tb_target_set_jmp_target later
90
- */
91
- tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
92
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
93
- tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
94
- set_jmp_reset_offset(s, a0);
95
- break;
96
-
97
case INDEX_op_goto_ptr:
98
tcg_out_insn(s, 3207, BR, a0);
99
break;
100
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
101
case INDEX_op_mov_i64:
102
case INDEX_op_call: /* Always emitted via tcg_out_call. */
103
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
104
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
105
default:
34
default:
106
g_assert_not_reached();
35
g_assert_not_reached();
107
}
36
}
108
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
37
- s_mask = smask_from_zmask(z_mask);
109
index XXXXXXX..XXXXXXX 100644
38
110
--- a/tcg/arm/tcg-target.c.inc
39
+ s_mask = 0;
111
+++ b/tcg/arm/tcg-target.c.inc
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
112
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
41
case TCG_BSWAP_OZ:
113
tcg_out_epilogue(s);
42
break;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
44
default:
45
/* The high bits are undefined: force all bits above the sign to 1. */
46
z_mask |= sign << 1;
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
114
}
57
}
115
58
116
+static void tcg_out_goto_tb(TCGContext *s, int which)
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
117
+{
118
+ /* Indirect jump method */
119
+ intptr_t ptr, dif, dil;
120
+ TCGReg base = TCG_REG_PC;
121
+
122
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
123
+ ptr = get_jmp_target_addr(s, which);
124
+ dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
125
+ dil = sextract32(dif, 0, 12);
126
+ if (dif != dil) {
127
+ /*
128
+ * The TB is close, but outside the 12 bits addressable by
129
+ * the load. We can extend this to 20 bits with a sub of a
130
+ * shifted immediate from pc. In the vastly unlikely event
131
+ * the code requires more than 1MB, we'll use 2 insns and
132
+ * be no worse off.
133
+ */
134
+ base = TCG_REG_R0;
135
+ tcg_out_movi32(s, COND_AL, base, ptr - dil);
136
+ }
137
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
138
+ set_jmp_reset_offset(s, which);
139
+}
140
+
141
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
142
const TCGArg args[TCG_MAX_OP_ARGS],
143
const int const_args[TCG_MAX_OP_ARGS])
144
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
145
int c;
146
147
switch (opc) {
148
- case INDEX_op_goto_tb:
149
- {
150
- /* Indirect jump method */
151
- intptr_t ptr, dif, dil;
152
- TCGReg base = TCG_REG_PC;
153
-
154
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
155
- ptr = get_jmp_target_addr(s, args[0]);
156
- dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
157
- dil = sextract32(dif, 0, 12);
158
- if (dif != dil) {
159
- /* The TB is close, but outside the 12 bits addressable by
160
- the load. We can extend this to 20 bits with a sub of a
161
- shifted immediate from pc. In the vastly unlikely event
162
- the code requires more than 1MB, we'll use 2 insns and
163
- be no worse off. */
164
- base = TCG_REG_R0;
165
- tcg_out_movi32(s, COND_AL, base, ptr - dil);
166
- }
167
- tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
168
- set_jmp_reset_offset(s, args[0]);
169
- }
170
- break;
171
case INDEX_op_goto_ptr:
172
tcg_out_b_reg(s, COND_AL, args[0]);
173
break;
174
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
175
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
176
case INDEX_op_call: /* Always emitted via tcg_out_call. */
177
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
178
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
179
default:
180
tcg_abort();
181
}
182
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
183
index XXXXXXX..XXXXXXX 100644
184
--- a/tcg/i386/tcg-target.c.inc
185
+++ b/tcg/i386/tcg-target.c.inc
186
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
187
}
188
}
189
190
+static void tcg_out_goto_tb(TCGContext *s, int which)
191
+{
192
+ /*
193
+ * Jump displacement must be aligned for atomic patching;
194
+ * see if we need to add extra nops before jump
195
+ */
196
+ int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
197
+ if (gap != 1) {
198
+ tcg_out_nopn(s, gap - 1);
199
+ }
200
+ tcg_out8(s, OPC_JMP_long); /* jmp im */
201
+ set_jmp_insn_offset(s, which);
202
+ tcg_out32(s, 0);
203
+ set_jmp_reset_offset(s, which);
204
+}
205
+
206
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
207
const TCGArg args[TCG_MAX_OP_ARGS],
208
const int const_args[TCG_MAX_OP_ARGS])
209
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
210
const_a2 = const_args[2];
211
212
switch (opc) {
213
- case INDEX_op_goto_tb:
214
- {
215
- /*
216
- * Jump displacement must be aligned for atomic patching;
217
- * see if we need to add extra nops before jump
218
- */
219
- int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
220
- if (gap != 1) {
221
- tcg_out_nopn(s, gap - 1);
222
- }
223
- tcg_out8(s, OPC_JMP_long); /* jmp im */
224
- set_jmp_insn_offset(s, a0);
225
- tcg_out32(s, 0);
226
- }
227
- set_jmp_reset_offset(s, a0);
228
- break;
229
case INDEX_op_goto_ptr:
230
/* jmp to the given host address (could be epilogue) */
231
tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
232
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
233
case INDEX_op_mov_i64:
234
case INDEX_op_call: /* Always emitted via tcg_out_call. */
235
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
236
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
237
default:
238
tcg_abort();
239
}
240
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
241
index XXXXXXX..XXXXXXX 100644
242
--- a/tcg/loongarch64/tcg-target.c.inc
243
+++ b/tcg/loongarch64/tcg-target.c.inc
244
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
245
}
246
}
247
248
+static void tcg_out_goto_tb(TCGContext *s, int which)
249
+{
250
+ /*
251
+ * Ensure that patch area is 8-byte aligned so that an
252
+ * atomic write can be used to patch the target address.
253
+ */
254
+ if ((uintptr_t)s->code_ptr & 7) {
255
+ tcg_out_nop(s);
256
+ }
257
+ set_jmp_insn_offset(s, which);
258
+ /*
259
+ * actual branch destination will be patched by
260
+ * tb_target_set_jmp_target later
261
+ */
262
+ tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
263
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
264
+ set_jmp_reset_offset(s, which);
265
+}
266
+
267
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
268
const TCGArg args[TCG_MAX_OP_ARGS],
269
const int const_args[TCG_MAX_OP_ARGS])
270
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
271
int c2 = const_args[2];
272
273
switch (opc) {
274
- case INDEX_op_goto_tb:
275
- /*
276
- * Ensure that patch area is 8-byte aligned so that an
277
- * atomic write can be used to patch the target address.
278
- */
279
- if ((uintptr_t)s->code_ptr & 7) {
280
- tcg_out_nop(s);
281
- }
282
- set_jmp_insn_offset(s, a0);
283
- /*
284
- * actual branch destination will be patched by
285
- * tb_target_set_jmp_target later
286
- */
287
- tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
288
- tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
289
- set_jmp_reset_offset(s, a0);
290
- break;
291
-
292
case INDEX_op_mb:
293
tcg_out_mb(s, a0);
294
break;
295
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
296
case INDEX_op_mov_i64:
297
case INDEX_op_call: /* Always emitted via tcg_out_call. */
298
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
299
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
300
default:
60
default:
301
g_assert_not_reached();
61
g_assert_not_reached();
302
}
62
}
303
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
304
index XXXXXXX..XXXXXXX 100644
64
return false;
305
--- a/tcg/mips/tcg-target.c.inc
306
+++ b/tcg/mips/tcg-target.c.inc
307
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
308
tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
309
}
65
}
310
66
311
+static void tcg_out_goto_tb(TCGContext *s, int which)
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
312
+{
68
return true;
313
+ /* indirect jump method */
314
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
315
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
316
+ get_jmp_target_addr(s, which));
317
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
318
+ tcg_out_nop(s);
319
+ set_jmp_reset_offset(s, which);
320
+}
321
+
322
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
323
const TCGArg args[TCG_MAX_OP_ARGS],
324
const int const_args[TCG_MAX_OP_ARGS])
325
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
326
c2 = const_args[2];
327
328
switch (opc) {
329
- case INDEX_op_goto_tb:
330
- /* indirect jump method */
331
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
332
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
333
- get_jmp_target_addr(s, a0));
334
- tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
335
- tcg_out_nop(s);
336
- set_jmp_reset_offset(s, a0);
337
- break;
338
case INDEX_op_goto_ptr:
339
/* jmp to the given host address (could be epilogue) */
340
tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
341
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
342
case INDEX_op_mov_i64:
343
case INDEX_op_call: /* Always emitted via tcg_out_call. */
344
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
345
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
346
default:
347
tcg_abort();
348
}
69
}
349
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
70
ctx->z_mask = z_mask;
350
index XXXXXXX..XXXXXXX 100644
71
- ctx->s_mask = smask_from_zmask(z_mask);
351
--- a/tcg/ppc/tcg-target.c.inc
72
352
+++ b/tcg/ppc/tcg-target.c.inc
73
return fold_masks(ctx, op);
353
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
354
tcg_out_b(s, 0, tcg_code_gen_epilogue);
355
}
74
}
356
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
357
+static void tcg_out_goto_tb(TCGContext *s, int which)
358
+{
359
+ /* Direct jump. */
360
+ if (TCG_TARGET_REG_BITS == 64) {
361
+ /* Ensure the next insns are 8 or 16-byte aligned. */
362
+ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
363
+ tcg_out32(s, NOP);
364
+ }
365
+ set_jmp_insn_offset(s, which);
366
+ tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
367
+ tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
368
+ tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
369
+ tcg_out32(s, BCCTR | BO_ALWAYS);
370
+ set_jmp_reset_offset(s, which);
371
+ if (USE_REG_TB) {
372
+ /* For the unlinked case, need to reset TCG_REG_TB. */
373
+ tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
374
+ -tcg_current_code_size(s));
375
+ }
376
+ } else {
377
+ set_jmp_insn_offset(s, which);
378
+ tcg_out32(s, B);
379
+ set_jmp_reset_offset(s, which);
380
+ }
381
+}
382
+
383
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
384
const TCGArg args[TCG_MAX_OP_ARGS],
385
const int const_args[TCG_MAX_OP_ARGS])
386
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
387
TCGArg a0, a1, a2;
388
389
switch (opc) {
390
- case INDEX_op_goto_tb:
391
- /* Direct jump. */
392
- if (TCG_TARGET_REG_BITS == 64) {
393
- /* Ensure the next insns are 8 or 16-byte aligned. */
394
- while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
395
- tcg_out32(s, NOP);
396
- }
397
- set_jmp_insn_offset(s, args[0]);
398
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
399
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
400
- } else {
401
- set_jmp_insn_offset(s, args[0]);
402
- tcg_out32(s, B);
403
- set_jmp_reset_offset(s, args[0]);
404
- break;
405
- }
406
- tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
407
- tcg_out32(s, BCCTR | BO_ALWAYS);
408
- set_jmp_reset_offset(s, args[0]);
409
- if (USE_REG_TB) {
410
- /* For the unlinked case, need to reset TCG_REG_TB. */
411
- tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
412
- -tcg_current_code_size(s));
413
- }
414
- break;
415
case INDEX_op_goto_ptr:
416
tcg_out32(s, MTSPR | RS(args[0]) | CTR);
417
if (USE_REG_TB) {
418
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
419
case INDEX_op_mov_i64:
420
case INDEX_op_call: /* Always emitted via tcg_out_call. */
421
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
422
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
423
default:
424
tcg_abort();
425
}
76
}
426
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
77
427
index XXXXXXX..XXXXXXX 100644
78
ctx->z_mask = z_mask;
428
--- a/tcg/riscv/tcg-target.c.inc
79
- ctx->s_mask = smask_from_zmask(z_mask);
429
+++ b/tcg/riscv/tcg-target.c.inc
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
430
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
81
return true;
431
}
82
}
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
94
}
95
}
96
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
98
fold_setcond_tst_pow2(ctx, op, false);
99
100
ctx->z_mask = 1;
101
- ctx->s_mask = smask_from_zmask(1);
102
return false;
432
}
103
}
433
104
434
+static void tcg_out_goto_tb(TCGContext *s, int which)
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
435
+{
106
}
436
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
107
437
+ /* indirect jump method */
108
ctx->z_mask = 1;
438
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
109
- ctx->s_mask = smask_from_zmask(1);
439
+ get_jmp_target_addr(s, which));
110
return false;
440
+ tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
111
441
+ set_jmp_reset_offset(s, which);
112
do_setcond_const:
442
+}
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
443
+
444
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
445
const TCGArg args[TCG_MAX_OP_ARGS],
446
const int const_args[TCG_MAX_OP_ARGS])
447
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
448
int c2 = const_args[2];
449
450
switch (opc) {
451
- case INDEX_op_goto_tb:
452
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
453
- /* indirect jump method */
454
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
455
- get_jmp_target_addr(s, a0));
456
- tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
457
- set_jmp_reset_offset(s, a0);
458
- break;
459
-
460
case INDEX_op_goto_ptr:
461
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
462
break;
114
break;
463
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
115
CASE_OP_32_64(ld8u):
464
case INDEX_op_mov_i64:
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
465
case INDEX_op_call: /* Always emitted via tcg_out_call. */
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
466
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
118
break;
467
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
119
CASE_OP_32_64(ld16s):
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
121
break;
122
CASE_OP_32_64(ld16u):
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
125
break;
126
case INDEX_op_ld32s_i64:
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
128
break;
129
case INDEX_op_ld32u_i64:
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
132
break;
468
default:
133
default:
469
g_assert_not_reached();
134
g_assert_not_reached();
470
}
471
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
472
index XXXXXXX..XXXXXXX 100644
473
--- a/tcg/s390x/tcg-target.c.inc
474
+++ b/tcg/s390x/tcg-target.c.inc
475
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
476
}
477
}
478
479
+static void tcg_out_goto_tb(TCGContext *s, int which)
480
+{
481
+ /*
482
+ * Branch displacement must be aligned for atomic patching;
483
+ * see if we need to add extra nop before branch
484
+ */
485
+ if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
486
+ tcg_out16(s, NOP);
487
+ }
488
+ tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
489
+ set_jmp_insn_offset(s, which);
490
+ s->code_ptr += 2;
491
+ set_jmp_reset_offset(s, which);
492
+}
493
+
494
# define OP_32_64(x) \
495
case glue(glue(INDEX_op_,x),_i32): \
496
case glue(glue(INDEX_op_,x),_i64)
497
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
498
TCGArg a0, a1, a2;
499
500
switch (opc) {
501
- case INDEX_op_goto_tb:
502
- a0 = args[0];
503
- /*
504
- * branch displacement must be aligned for atomic patching;
505
- * see if we need to add extra nop before branch
506
- */
507
- if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
508
- tcg_out16(s, NOP);
509
- }
510
- tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
511
- set_jmp_insn_offset(s, a0);
512
- s->code_ptr += 2;
513
- set_jmp_reset_offset(s, a0);
514
- break;
515
-
516
case INDEX_op_goto_ptr:
517
a0 = args[0];
518
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
519
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
520
case INDEX_op_mov_i64:
521
case INDEX_op_call: /* Always emitted via tcg_out_call. */
522
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
523
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
524
default:
525
tcg_abort();
526
}
527
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
528
index XXXXXXX..XXXXXXX 100644
529
--- a/tcg/sparc64/tcg-target.c.inc
530
+++ b/tcg/sparc64/tcg-target.c.inc
531
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
532
tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
533
}
534
535
+static void tcg_out_goto_tb(TCGContext *s, int which)
536
+{
537
+ /* Direct jump. */
538
+ if (USE_REG_TB) {
539
+ /* make sure the patch is 8-byte aligned. */
540
+ if ((intptr_t)s->code_ptr & 4) {
541
+ tcg_out_nop(s);
542
+ }
543
+ set_jmp_insn_offset(s, which);
544
+ tcg_out_sethi(s, TCG_REG_T1, 0);
545
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
546
+ tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
547
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
548
+ } else {
549
+ set_jmp_insn_offset(s, which);
550
+ tcg_out32(s, CALL);
551
+ tcg_out_nop(s);
552
+ }
553
+ set_jmp_reset_offset(s, which);
554
+
555
+ /*
556
+ * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
557
+ * to the beginning of this TB.
558
+ */
559
+ if (USE_REG_TB) {
560
+ int c = -tcg_current_code_size(s);
561
+ if (check_fit_i32(c, 13)) {
562
+ tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
563
+ } else {
564
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
565
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
566
+ }
567
+ }
568
+}
569
+
570
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
571
const TCGArg args[TCG_MAX_OP_ARGS],
572
const int const_args[TCG_MAX_OP_ARGS])
573
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
574
c2 = const_args[2];
575
576
switch (opc) {
577
- case INDEX_op_goto_tb:
578
- /* Direct jump. */
579
- if (USE_REG_TB) {
580
- /* make sure the patch is 8-byte aligned. */
581
- if ((intptr_t)s->code_ptr & 4) {
582
- tcg_out_nop(s);
583
- }
584
- set_jmp_insn_offset(s, a0);
585
- tcg_out_sethi(s, TCG_REG_T1, 0);
586
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
587
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
588
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
589
- } else {
590
- set_jmp_insn_offset(s, a0);
591
- tcg_out32(s, CALL);
592
- tcg_out_nop(s);
593
- }
594
- set_jmp_reset_offset(s, a0);
595
-
596
- /* For the unlinked path of goto_tb, we need to reset
597
- TCG_REG_TB to the beginning of this TB. */
598
- if (USE_REG_TB) {
599
- c = -tcg_current_code_size(s);
600
- if (check_fit_i32(c, 13)) {
601
- tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
602
- } else {
603
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
604
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
605
- TCG_REG_T1, ARITH_ADD);
606
- }
607
- }
608
- break;
609
case INDEX_op_goto_ptr:
610
tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
611
if (USE_REG_TB) {
612
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
613
case INDEX_op_mov_i64:
614
case INDEX_op_call: /* Always emitted via tcg_out_call. */
615
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
616
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
617
default:
618
tcg_abort();
619
}
620
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
621
index XXXXXXX..XXXXXXX 100644
622
--- a/tcg/tci/tcg-target.c.inc
623
+++ b/tcg/tci/tcg-target.c.inc
624
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
625
tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
626
}
627
628
+static void tcg_out_goto_tb(TCGContext *s, int which)
629
+{
630
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
631
+ /* indirect jump method. */
632
+ tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
633
+ set_jmp_reset_offset(s, which);
634
+}
635
+
636
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
637
const TCGArg args[TCG_MAX_OP_ARGS],
638
const int const_args[TCG_MAX_OP_ARGS])
639
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
640
TCGOpcode exts;
641
642
switch (opc) {
643
- case INDEX_op_goto_tb:
644
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
645
- /* indirect jump method. */
646
- tcg_out_op_p(s, opc, (void *)get_jmp_target_addr(s, args[0]));
647
- set_jmp_reset_offset(s, args[0]);
648
- break;
649
-
650
case INDEX_op_goto_ptr:
651
tcg_out_op_r(s, opc, args[0]);
652
break;
653
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
654
case INDEX_op_mov_i64:
655
case INDEX_op_call: /* Always emitted via tcg_out_call. */
656
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
657
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
658
default:
659
tcg_abort();
660
}
661
--
135
--
662
2.34.1
136
2.43.0
663
664
diff view generated by jsdifflib
New patch
1
Change the representation from sign bit repetitions to all bits equal
2
to the sign bit, including the sign bit itself.
1
3
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
---
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
25
1 file changed, 15 insertions(+), 49 deletions(-)
26
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
28
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/optimize.c
30
+++ b/tcg/optimize.c
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
33
uint64_t val;
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
37
} TempOptInfo;
38
39
typedef struct OptContext {
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
41
42
/* In flight values from optimization. */
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
46
TCGType type;
47
} OptContext;
48
49
-/* Calculate the smask for a specific value. */
50
-static uint64_t smask_from_value(uint64_t value)
51
-{
52
- int rep = clrsb64(value);
53
- return ~(~0ull >> rep);
54
-}
55
-
56
-/*
57
- * Calculate the smask for a given set of known-zeros.
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
63
-{
64
- /*
65
- * Only the 0 bits are significant for zmask, thus the msb itself
66
- * must be zero, else we have no sign information.
67
- */
68
- int rep = clz64(zmask);
69
- if (rep == 0) {
70
- return 0;
71
- }
72
- rep -= 1;
73
- return ~(~0ull >> rep);
74
-}
75
-
76
-/*
77
- * Recreate a properly left-aligned smask after manipulation.
78
- * Some bit-shuffling, particularly shifts and rotates, may
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
83
-{
84
- /* Only the 1 bits are significant for smask */
85
- return smask_from_zmask(~smask);
86
-}
87
-
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
89
{
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
106
}
107
}
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
109
* The passed s_mask may be augmented by z_mask.
110
*/
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
112
- uint64_t z_mask, uint64_t s_mask)
113
+ uint64_t z_mask, int64_t s_mask)
114
{
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
116
TCGTemp *ts;
117
TempOptInfo *ti;
118
+ int rep;
119
120
/* Only single-output opcodes are supported here. */
121
tcg_debug_assert(def->nb_oargs == 1);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
125
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
136
+
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
142
+
143
return true;
144
}
145
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
147
148
ctx->z_mask = z_mask;
149
ctx->s_mask = s_mask;
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
152
return true;
153
}
154
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
157
ctx->s_mask = s_mask;
158
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
161
return true;
162
}
163
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
166
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
168
- ctx->s_mask = smask_from_smask(s_mask);
169
170
return fold_masks(ctx, op);
171
}
172
--
173
2.43.0
diff view generated by jsdifflib
1
This can replace four other variables that are references
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
into the TranslationBlock structure.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
include/tcg/tcg.h | 11 +++--------
4
tcg/optimize.c | 9 +++++----
8
accel/tcg/translate-all.c | 2 +-
5
1 file changed, 5 insertions(+), 4 deletions(-)
9
tcg/tcg-op.c | 14 +++++++-------
10
tcg/tcg.c | 14 +++-----------
11
4 files changed, 14 insertions(+), 27 deletions(-)
12
6
13
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg.h
9
--- a/tcg/optimize.c
16
+++ b/include/tcg/tcg.h
10
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
18
int nb_indirects;
12
remove_mem_copy_all(ctx);
19
int nb_ops;
13
}
20
14
21
- /* goto_tb support */
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
22
- tcg_insn_unit *code_buf;
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
23
- uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
24
- uintptr_t *tb_jmp_insn_offset; /* tb->jmp_target_arg if direct_jump */
25
- uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
26
-
27
TCGRegSet reserved_regs;
28
- uint32_t tb_cflags; /* cflags of the current TB */
29
intptr_t current_frame_offset;
30
intptr_t frame_start;
31
intptr_t frame_end;
32
TCGTemp *frame_temp;
33
34
- tcg_insn_unit *code_ptr;
35
+ TranslationBlock *gen_tb; /* tb for which code is being generated */
36
+ tcg_insn_unit *code_buf; /* pointer for start of tb */
37
+ tcg_insn_unit *code_ptr; /* pointer for running end of tb */
38
39
#ifdef CONFIG_PROFILER
40
TCGProfile prof;
41
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/accel/tcg/translate-all.c
44
+++ b/accel/tcg/translate-all.c
45
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
46
tb->trace_vcpu_dstate = *cpu->trace_dstate;
47
tb_set_page_addr0(tb, phys_pc);
48
tb_set_page_addr1(tb, -1);
49
- tcg_ctx->tb_cflags = cflags;
50
+ tcg_ctx->gen_tb = tb;
51
tb_overflow:
52
53
#ifdef CONFIG_PROFILER
54
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/tcg/tcg-op.c
57
+++ b/tcg/tcg-op.c
58
@@ -XXX,XX +XXX,XX @@ void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
59
60
void tcg_gen_mb(TCGBar mb_type)
61
{
17
{
62
- if (tcg_ctx->tb_cflags & CF_PARALLEL) {
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
63
+ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) {
19
int i, nb_oargs;
64
tcg_gen_op1(INDEX_op_mb, mb_type);
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
21
ts_info(ts)->z_mask = ctx->z_mask;
22
}
65
}
23
}
24
+ return true;
66
}
25
}
67
@@ -XXX,XX +XXX,XX @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
26
68
void tcg_gen_goto_tb(unsigned idx)
27
/*
69
{
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
70
/* We tested CF_NO_GOTO_TB in translator_use_goto_tb. */
29
fold_xi_to_x(ctx, op, 0)) {
71
- tcg_debug_assert(!(tcg_ctx->tb_cflags & CF_NO_GOTO_TB));
30
return true;
72
+ tcg_debug_assert(!(tcg_ctx->gen_tb->cflags & CF_NO_GOTO_TB));
73
/* We only support two chained exits. */
74
tcg_debug_assert(idx <= TB_EXIT_IDXMAX);
75
#ifdef CONFIG_DEBUG_TCG
76
@@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void)
77
{
78
TCGv_ptr ptr;
79
80
- if (tcg_ctx->tb_cflags & CF_NO_GOTO_PTR) {
81
+ if (tcg_ctx->gen_tb->cflags & CF_NO_GOTO_PTR) {
82
tcg_gen_exit_tb(NULL, 0);
83
return;
84
}
31
}
85
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
32
- return false;
86
{
33
+ return finish_folding(ctx, op);
87
memop = tcg_canonicalize_memop(memop, 0, 0);
88
89
- if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
90
+ if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
91
TCGv_i32 t1 = tcg_temp_new_i32();
92
TCGv_i32 t2 = tcg_temp_new_i32();
93
94
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
95
{
96
memop = tcg_canonicalize_memop(memop, 1, 0);
97
98
- if (!(tcg_ctx->tb_cflags & CF_PARALLEL)) {
99
+ if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) {
100
TCGv_i64 t1 = tcg_temp_new_i64();
101
TCGv_i64 t2 = tcg_temp_new_i64();
102
103
@@ -XXX,XX +XXX,XX @@ static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \
104
void tcg_gen_atomic_##NAME##_i32 \
105
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \
106
{ \
107
- if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
108
+ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
109
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
110
} else { \
111
do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
112
@@ -XXX,XX +XXX,XX @@ void tcg_gen_atomic_##NAME##_i32 \
113
void tcg_gen_atomic_##NAME##_i64 \
114
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \
115
{ \
116
- if (tcg_ctx->tb_cflags & CF_PARALLEL) { \
117
+ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \
118
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
119
} else { \
120
do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \
121
diff --git a/tcg/tcg.c b/tcg/tcg.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/tcg.c
124
+++ b/tcg/tcg.c
125
@@ -XXX,XX +XXX,XX @@ static void set_jmp_reset_offset(TCGContext *s, int which)
126
* We will check for overflow at the end of the opcode loop in
127
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
128
*/
129
- s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
130
+ s->gen_tb->jmp_reset_offset[which] = tcg_current_code_size(s);
131
}
34
}
132
35
133
static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
36
/* We cannot as yet do_constant_folding with vectors. */
134
@@ -XXX,XX +XXX,XX @@ static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
135
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
38
fold_xi_to_x(ctx, op, 0)) {
136
*/
39
return true;
137
tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
40
}
138
- s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
41
- return false;
139
+ s->gen_tb->jmp_target_arg[which] = tcg_current_code_size(s);
42
+ return finish_folding(ctx, op);
140
}
43
}
141
44
142
static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
143
@@ -XXX,XX +XXX,XX @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
144
* Return the read-execute version of the pointer, for the benefit
47
op->args[4] = arg_new_constant(ctx, bl);
145
* of any pc-relative addressing mode.
48
op->args[5] = arg_new_constant(ctx, bh);
146
*/
49
}
147
- return (uintptr_t)tcg_splitwx_to_rx(&s->tb_jmp_target_addr[which]);
50
- return false;
148
+ return (uintptr_t)tcg_splitwx_to_rx(s->gen_tb->jmp_target_arg + which);
51
+ return finish_folding(ctx, op);
149
}
52
}
150
53
151
/* Signal overflow, starting over with fewer guest insns. */
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
152
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
153
/* Initialize goto_tb jump offsets. */
154
tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
155
tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
156
- tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
157
- if (TCG_TARGET_HAS_direct_jump) {
158
- tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
159
- tcg_ctx->tb_jmp_target_addr = NULL;
160
- } else {
161
- tcg_ctx->tb_jmp_insn_offset = NULL;
162
- tcg_ctx->tb_jmp_target_addr = tb->jmp_target_arg;
163
- }
164
165
tcg_reg_alloc_start(s);
166
167
--
55
--
168
2.34.1
56
2.43.0
169
170
diff view generated by jsdifflib
1
The INDEX_op_exit_tb opcode needs no register allocation.
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
Split out a dedicated helper function for it.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
4
---
8
tcg/tcg.c | 4 ++++
5
tcg/optimize.c | 20 +++++++++++++++++---
9
tcg/aarch64/tcg-target.c.inc | 22 ++++++++++--------
6
1 file changed, 17 insertions(+), 3 deletions(-)
10
tcg/arm/tcg-target.c.inc | 11 +++++----
11
tcg/i386/tcg-target.c.inc | 21 +++++++++--------
12
tcg/loongarch64/tcg-target.c.inc | 22 ++++++++++--------
13
tcg/mips/tcg-target.c.inc | 33 +++++++++++++--------------
14
tcg/ppc/tcg-target.c.inc | 11 +++++----
15
tcg/riscv/tcg-target.c.inc | 22 ++++++++++--------
16
tcg/s390x/tcg-target.c.inc | 23 ++++++++++---------
17
tcg/sparc64/tcg-target.c.inc | 39 +++++++++++++++++---------------
18
tcg/tci/tcg-target.c.inc | 10 ++++----
19
11 files changed, 121 insertions(+), 97 deletions(-)
20
7
21
diff --git a/tcg/tcg.c b/tcg/tcg.c
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
22
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/tcg.c
10
--- a/tcg/optimize.c
24
+++ b/tcg/tcg.c
11
+++ b/tcg/optimize.c
25
@@ -XXX,XX +XXX,XX @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
26
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
13
return ts_info(arg_temp(arg));
27
static void tcg_out_movi(TCGContext *s, TCGType type,
14
}
28
TCGReg ret, tcg_target_long arg);
15
29
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
16
+static inline bool ti_is_const(TempOptInfo *ti)
30
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
31
const TCGArg args[TCG_MAX_OP_ARGS],
32
const int const_args[TCG_MAX_OP_ARGS]);
33
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
34
case INDEX_op_call:
35
tcg_reg_alloc_call(s, op);
36
break;
37
+ case INDEX_op_exit_tb:
38
+ tcg_out_exit_tb(s, op->args[0]);
39
+ break;
40
case INDEX_op_dup2_vec:
41
if (tcg_reg_alloc_dup2(s, op)) {
42
break;
43
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/aarch64/tcg-target.c.inc
46
+++ b/tcg/aarch64/tcg-target.c.inc
47
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
48
49
static const tcg_insn_unit *tb_ret_addr;
50
51
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
52
+{
17
+{
53
+ /* Reuse the zeroing that exists for goto_ptr. */
18
+ return ti->is_const;
54
+ if (a0 == 0) {
55
+ tcg_out_goto_long(s, tcg_code_gen_epilogue);
56
+ } else {
57
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
58
+ tcg_out_goto_long(s, tb_ret_addr);
59
+ }
60
+}
19
+}
61
+
20
+
62
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
63
const TCGArg args[TCG_MAX_OP_ARGS],
64
const int const_args[TCG_MAX_OP_ARGS])
65
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
66
#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
67
68
switch (opc) {
69
- case INDEX_op_exit_tb:
70
- /* Reuse the zeroing that exists for goto_ptr. */
71
- if (a0 == 0) {
72
- tcg_out_goto_long(s, tcg_code_gen_epilogue);
73
- } else {
74
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
75
- tcg_out_goto_long(s, tb_ret_addr);
76
- }
77
- break;
78
-
79
case INDEX_op_goto_tb:
80
tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
81
/*
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
83
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
84
case INDEX_op_mov_i64:
85
case INDEX_op_call: /* Always emitted via tcg_out_call. */
86
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
87
default:
88
g_assert_not_reached();
89
}
90
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
91
index XXXXXXX..XXXXXXX 100644
92
--- a/tcg/arm/tcg-target.c.inc
93
+++ b/tcg/arm/tcg-target.c.inc
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
95
96
static void tcg_out_epilogue(TCGContext *s);
97
98
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
99
+{
22
+{
100
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
23
+ return ti->val;
101
+ tcg_out_epilogue(s);
102
+}
24
+}
103
+
25
+
104
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
105
const TCGArg args[TCG_MAX_OP_ARGS],
106
const int const_args[TCG_MAX_OP_ARGS])
107
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
108
int c;
109
110
switch (opc) {
111
- case INDEX_op_exit_tb:
112
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, args[0]);
113
- tcg_out_epilogue(s);
114
- break;
115
case INDEX_op_goto_tb:
116
{
117
/* Indirect jump method */
118
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
119
120
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
121
case INDEX_op_call: /* Always emitted via tcg_out_call. */
122
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
123
default:
124
tcg_abort();
125
}
126
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
127
index XXXXXXX..XXXXXXX 100644
128
--- a/tcg/i386/tcg-target.c.inc
129
+++ b/tcg/i386/tcg-target.c.inc
130
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
131
#endif
132
}
133
134
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
135
+{
27
+{
136
+ /* Reuse the zeroing that exists for goto_ptr. */
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
137
+ if (a0 == 0) {
138
+ tcg_out_jmp(s, tcg_code_gen_epilogue);
139
+ } else {
140
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
141
+ tcg_out_jmp(s, tb_ret_addr);
142
+ }
143
+}
29
+}
144
+
30
+
145
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
31
static inline bool ts_is_const(TCGTemp *ts)
146
const TCGArg args[TCG_MAX_OP_ARGS],
32
{
147
const int const_args[TCG_MAX_OP_ARGS])
33
- return ts_info(ts)->is_const;
148
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
34
+ return ti_is_const(ts_info(ts));
149
const_a2 = const_args[2];
150
151
switch (opc) {
152
- case INDEX_op_exit_tb:
153
- /* Reuse the zeroing that exists for goto_ptr. */
154
- if (a0 == 0) {
155
- tcg_out_jmp(s, tcg_code_gen_epilogue);
156
- } else {
157
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, a0);
158
- tcg_out_jmp(s, tb_ret_addr);
159
- }
160
- break;
161
case INDEX_op_goto_tb:
162
if (s->tb_jmp_insn_offset) {
163
/* direct jump method */
164
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
165
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
166
case INDEX_op_mov_i64:
167
case INDEX_op_call: /* Always emitted via tcg_out_call. */
168
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
169
default:
170
tcg_abort();
171
}
172
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
173
index XXXXXXX..XXXXXXX 100644
174
--- a/tcg/loongarch64/tcg-target.c.inc
175
+++ b/tcg/loongarch64/tcg-target.c.inc
176
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
177
178
static const tcg_insn_unit *tb_ret_addr;
179
180
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
181
+{
182
+ /* Reuse the zeroing that exists for goto_ptr. */
183
+ if (a0 == 0) {
184
+ tcg_out_call_int(s, tcg_code_gen_epilogue, true);
185
+ } else {
186
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
187
+ tcg_out_call_int(s, tb_ret_addr, true);
188
+ }
189
+}
190
+
191
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
192
const TCGArg args[TCG_MAX_OP_ARGS],
193
const int const_args[TCG_MAX_OP_ARGS])
194
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
195
int c2 = const_args[2];
196
197
switch (opc) {
198
- case INDEX_op_exit_tb:
199
- /* Reuse the zeroing that exists for goto_ptr. */
200
- if (a0 == 0) {
201
- tcg_out_call_int(s, tcg_code_gen_epilogue, true);
202
- } else {
203
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
204
- tcg_out_call_int(s, tb_ret_addr, true);
205
- }
206
- break;
207
-
208
case INDEX_op_goto_tb:
209
tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
210
/*
211
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
212
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
213
case INDEX_op_mov_i64:
214
case INDEX_op_call: /* Always emitted via tcg_out_call. */
215
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
216
default:
217
g_assert_not_reached();
218
}
219
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
220
index XXXXXXX..XXXXXXX 100644
221
--- a/tcg/mips/tcg-target.c.inc
222
+++ b/tcg/mips/tcg-target.c.inc
223
@@ -XXX,XX +XXX,XX @@ static void tcg_out_clz(TCGContext *s, MIPSInsn opcv2, MIPSInsn opcv6,
224
}
225
}
35
}
226
36
227
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
228
+{
38
{
229
+ TCGReg b0 = TCG_REG_ZERO;
39
- TempOptInfo *ti = ts_info(ts);
230
+
40
- return ti->is_const && ti->val == val;
231
+ if (a0 & ~0xffff) {
41
+ return ti_is_const_val(ts_info(ts), val);
232
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
233
+ b0 = TCG_REG_V0;
234
+ }
235
+ if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
236
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)tb_ret_addr);
237
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
238
+ }
239
+ tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
240
+}
241
+
242
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
243
const TCGArg args[TCG_MAX_OP_ARGS],
244
const int const_args[TCG_MAX_OP_ARGS])
245
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
246
c2 = const_args[2];
247
248
switch (opc) {
249
- case INDEX_op_exit_tb:
250
- {
251
- TCGReg b0 = TCG_REG_ZERO;
252
-
253
- a0 = (intptr_t)a0;
254
- if (a0 & ~0xffff) {
255
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, a0 & ~0xffff);
256
- b0 = TCG_REG_V0;
257
- }
258
- if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) {
259
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
260
- (uintptr_t)tb_ret_addr);
261
- tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
262
- }
263
- tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
264
- }
265
- break;
266
case INDEX_op_goto_tb:
267
/* indirect jump method */
268
tcg_debug_assert(s->tb_jmp_insn_offset == 0);
269
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
270
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
271
case INDEX_op_mov_i64:
272
case INDEX_op_call: /* Always emitted via tcg_out_call. */
273
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
274
default:
275
tcg_abort();
276
}
277
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
278
index XXXXXXX..XXXXXXX 100644
279
--- a/tcg/ppc/tcg-target.c.inc
280
+++ b/tcg/ppc/tcg-target.c.inc
281
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
282
tcg_out32(s, BCLR | BO_ALWAYS);
283
}
42
}
284
43
285
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
44
static inline bool arg_is_const(TCGArg arg)
286
+{
287
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
288
+ tcg_out_b(s, 0, tcg_code_gen_epilogue);
289
+}
290
+
291
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
292
const TCGArg args[TCG_MAX_OP_ARGS],
293
const int const_args[TCG_MAX_OP_ARGS])
294
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
295
TCGArg a0, a1, a2;
296
297
switch (opc) {
298
- case INDEX_op_exit_tb:
299
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, args[0]);
300
- tcg_out_b(s, 0, tcg_code_gen_epilogue);
301
- break;
302
case INDEX_op_goto_tb:
303
if (s->tb_jmp_insn_offset) {
304
/* Direct jump. */
305
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
306
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
307
case INDEX_op_mov_i64:
308
case INDEX_op_call: /* Always emitted via tcg_out_call. */
309
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
310
default:
311
tcg_abort();
312
}
313
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
314
index XXXXXXX..XXXXXXX 100644
315
--- a/tcg/riscv/tcg-target.c.inc
316
+++ b/tcg/riscv/tcg-target.c.inc
317
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
318
319
static const tcg_insn_unit *tb_ret_addr;
320
321
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
322
+{
323
+ /* Reuse the zeroing that exists for goto_ptr. */
324
+ if (a0 == 0) {
325
+ tcg_out_call_int(s, tcg_code_gen_epilogue, true);
326
+ } else {
327
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
328
+ tcg_out_call_int(s, tb_ret_addr, true);
329
+ }
330
+}
331
+
332
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
333
const TCGArg args[TCG_MAX_OP_ARGS],
334
const int const_args[TCG_MAX_OP_ARGS])
335
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
336
int c2 = const_args[2];
337
338
switch (opc) {
339
- case INDEX_op_exit_tb:
340
- /* Reuse the zeroing that exists for goto_ptr. */
341
- if (a0 == 0) {
342
- tcg_out_call_int(s, tcg_code_gen_epilogue, true);
343
- } else {
344
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
345
- tcg_out_call_int(s, tb_ret_addr, true);
346
- }
347
- break;
348
-
349
case INDEX_op_goto_tb:
350
assert(s->tb_jmp_insn_offset == 0);
351
/* indirect jump method */
352
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
353
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
354
case INDEX_op_mov_i64:
355
case INDEX_op_call: /* Always emitted via tcg_out_call. */
356
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
357
default:
358
g_assert_not_reached();
359
}
360
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
361
index XXXXXXX..XXXXXXX 100644
362
--- a/tcg/s390x/tcg-target.c.inc
363
+++ b/tcg/s390x/tcg-target.c.inc
364
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
365
#endif
366
}
367
368
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
369
+{
370
+ /* Reuse the zeroing that exists for goto_ptr. */
371
+ if (a0 == 0) {
372
+ tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
373
+ } else {
374
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
375
+ tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
376
+ }
377
+}
378
+
379
# define OP_32_64(x) \
380
case glue(glue(INDEX_op_,x),_i32): \
381
case glue(glue(INDEX_op_,x),_i64)
382
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
383
TCGArg a0, a1, a2;
384
385
switch (opc) {
386
- case INDEX_op_exit_tb:
387
- /* Reuse the zeroing that exists for goto_ptr. */
388
- a0 = args[0];
389
- if (a0 == 0) {
390
- tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
391
- } else {
392
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
393
- tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
394
- }
395
- break;
396
-
397
case INDEX_op_goto_tb:
398
a0 = args[0];
399
/*
400
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
401
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
402
case INDEX_op_mov_i64:
403
case INDEX_op_call: /* Always emitted via tcg_out_call. */
404
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
405
default:
406
tcg_abort();
407
}
408
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
409
index XXXXXXX..XXXXXXX 100644
410
--- a/tcg/sparc64/tcg-target.c.inc
411
+++ b/tcg/sparc64/tcg-target.c.inc
412
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
413
#endif /* CONFIG_SOFTMMU */
414
}
415
416
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
417
+{
418
+ if (check_fit_ptr(a0, 13)) {
419
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
420
+ tcg_out_movi_imm13(s, TCG_REG_O0, a0);
421
+ return;
422
+ } else if (USE_REG_TB) {
423
+ intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
424
+ if (check_fit_ptr(tb_diff, 13)) {
425
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
426
+ /* Note that TCG_REG_TB has been unwound to O1. */
427
+ tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
428
+ return;
429
+ }
430
+ }
431
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
432
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
433
+ tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
434
+}
435
+
436
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
437
const TCGArg args[TCG_MAX_OP_ARGS],
438
const int const_args[TCG_MAX_OP_ARGS])
439
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
440
c2 = const_args[2];
441
442
switch (opc) {
443
- case INDEX_op_exit_tb:
444
- if (check_fit_ptr(a0, 13)) {
445
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
446
- tcg_out_movi_imm13(s, TCG_REG_O0, a0);
447
- break;
448
- } else if (USE_REG_TB) {
449
- intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
450
- if (check_fit_ptr(tb_diff, 13)) {
451
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
452
- /* Note that TCG_REG_TB has been unwound to O1. */
453
- tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
454
- break;
455
- }
456
- }
457
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
458
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
459
- tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
460
- break;
461
case INDEX_op_goto_tb:
462
if (s->tb_jmp_insn_offset) {
463
/* direct jump method */
464
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
465
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
466
case INDEX_op_mov_i64:
467
case INDEX_op_call: /* Always emitted via tcg_out_call. */
468
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
469
default:
470
tcg_abort();
471
}
472
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
473
index XXXXXXX..XXXXXXX 100644
474
--- a/tcg/tci/tcg-target.c.inc
475
+++ b/tcg/tci/tcg-target.c.inc
476
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
477
# define CASE_64(x)
478
#endif
479
480
+static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
481
+{
482
+ tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
483
+}
484
+
485
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
486
const TCGArg args[TCG_MAX_OP_ARGS],
487
const int const_args[TCG_MAX_OP_ARGS])
488
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
489
TCGOpcode exts;
490
491
switch (opc) {
492
- case INDEX_op_exit_tb:
493
- tcg_out_op_p(s, opc, (void *)args[0]);
494
- break;
495
-
496
case INDEX_op_goto_tb:
497
tcg_debug_assert(s->tb_jmp_insn_offset == 0);
498
/* indirect jump method. */
499
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
500
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
501
case INDEX_op_mov_i64:
502
case INDEX_op_call: /* Always emitted via tcg_out_call. */
503
+ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
504
default:
505
tcg_abort();
506
}
507
--
45
--
508
2.34.1
46
2.43.0
509
510
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
15
return true;
16
}
17
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_z(ctx, op, z_mask);
50
}
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
New patch
1
The input which overlaps the sign bit of the output can
2
have its input s_mask propagated to the output s_mask.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 14 ++++++++++++--
8
1 file changed, 12 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
47
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
49
- return fold_masks_z(ctx, op, z_mask);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
15
return fold_masks_zs(ctx, op, z_mask, 0);
16
}
17
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
31
+
32
if (fold_const2_commutative(ctx, op) ||
33
fold_xi_to_x(ctx, op, -1) ||
34
fold_xi_to_not(ctx, op, 0)) {
35
return true;
36
}
37
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
39
- & arg_info(op->args[2])->s_mask;
40
- return false;
41
+ s_mask = arg_info(op->args[1])->s_mask
42
+ & arg_info(op->args[2])->s_mask;
43
+ return fold_masks_s(ctx, op, s_mask);
44
}
45
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 6 +++---
5
1 file changed, 3 insertions(+), 3 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
21
fold_xi_to_i(ctx, op, 0)) {
22
return true;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
30
tcg_opt_gen_movi(ctx, op2, rh, h);
31
return true;
32
}
33
- return false;
34
+ return finish_folding(ctx, op);
35
}
36
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
38
--
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 9 ++-------
7
1 file changed, 2 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
27
}
28
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
30
--
31
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 7 +------
7
1 file changed, 1 insertion(+), 6 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
if (fold_const1(ctx, op)) {
15
return true;
16
}
17
-
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
19
-
20
- /* Because of fold_to_not, we want to always return true, via finish. */
21
- finish_folding(ctx, op);
22
- return true;
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
24
}
25
26
static bool fold_or(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
1
Similar to the existing set_jmp_reset_offset. Move any assert for
1
Avoid the use of the OptContext slots.
2
TCG_TARGET_HAS_direct_jump into the new function (which now cannot
3
be build-time). Will be unused if TCG_TARGET_HAS_direct_jump is
4
constant 0, but we can't test for constant in the preprocessor,
5
so just mark it G_GNUC_UNUSED.
6
2
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Be careful not to call fold_masks_zs when the memory operation
4
is wide enough to require multiple outputs, so split into two
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
9
---
10
tcg/tcg.c | 10 ++++++++++
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
11
tcg/aarch64/tcg-target.c.inc | 3 +--
11
1 file changed, 21 insertions(+), 5 deletions(-)
12
tcg/i386/tcg-target.c.inc | 3 +--
13
tcg/loongarch64/tcg-target.c.inc | 3 +--
14
tcg/ppc/tcg-target.c.inc | 7 +++----
15
tcg/s390x/tcg-target.c.inc | 2 +-
16
tcg/sparc64/tcg-target.c.inc | 5 ++---
17
7 files changed, 19 insertions(+), 14 deletions(-)
18
12
19
diff --git a/tcg/tcg.c b/tcg/tcg.c
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/tcg.c
15
--- a/tcg/optimize.c
22
+++ b/tcg/tcg.c
16
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ static void set_jmp_reset_offset(TCGContext *s, int which)
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
24
s->tb_jmp_reset_offset[which] = tcg_current_code_size(s);
18
return fold_masks_s(ctx, op, s_mask);
25
}
19
}
26
20
27
+static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
28
+{
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
29
+ /*
23
{
30
+ * We will check for overflow at the end of the opcode loop in
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
31
+ * tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
32
+ */
26
MemOp mop = get_memop(oi);
33
+ tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
27
int width = 8 * memop_size(mop);
34
+ s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
28
+ uint64_t z_mask = -1, s_mask = 0;
29
30
if (width < 64) {
31
if (mop & MO_SIGN) {
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
34
} else {
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
37
}
38
}
39
40
/* Opcodes that touch guest memory stop the mb optimization. */
41
ctx->prev_mb = NULL;
42
- return false;
43
+
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
35
+}
45
+}
36
+
46
+
37
/* Signal overflow, starting over with fewer guest insns. */
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
38
static G_NORETURN
48
+{
39
void tcg_raise_tb_overflow(TCGContext *s)
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
40
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
50
+ ctx->prev_mb = NULL;
41
index XXXXXXX..XXXXXXX 100644
51
+ return finish_folding(ctx, op);
42
--- a/tcg/aarch64/tcg-target.c.inc
52
}
43
+++ b/tcg/aarch64/tcg-target.c.inc
53
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
45
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
46
switch (opc) {
47
case INDEX_op_goto_tb:
48
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
49
/*
50
* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
51
* write can be used to patch the target address.
52
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
53
if ((uintptr_t)s->code_ptr & 7) {
54
tcg_out32(s, NOP);
55
}
56
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
57
+ set_jmp_insn_offset(s, a0);
58
/*
59
* actual branch destination will be patched by
60
* tb_target_set_jmp_target later
61
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
62
index XXXXXXX..XXXXXXX 100644
63
--- a/tcg/i386/tcg-target.c.inc
64
+++ b/tcg/i386/tcg-target.c.inc
65
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
66
67
switch (opc) {
68
case INDEX_op_goto_tb:
69
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
70
{
71
/*
72
* Jump displacement must be aligned for atomic patching;
73
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
74
tcg_out_nopn(s, gap - 1);
75
}
76
tcg_out8(s, OPC_JMP_long); /* jmp im */
77
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
78
+ set_jmp_insn_offset(s, a0);
79
tcg_out32(s, 0);
80
}
81
set_jmp_reset_offset(s, a0);
82
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
83
index XXXXXXX..XXXXXXX 100644
84
--- a/tcg/loongarch64/tcg-target.c.inc
85
+++ b/tcg/loongarch64/tcg-target.c.inc
86
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
87
88
switch (opc) {
89
case INDEX_op_goto_tb:
90
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
91
/*
92
* Ensure that patch area is 8-byte aligned so that an
93
* atomic write can be used to patch the target address.
94
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
95
if ((uintptr_t)s->code_ptr & 7) {
96
tcg_out_nop(s);
97
}
98
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
99
+ set_jmp_insn_offset(s, a0);
100
/*
101
* actual branch destination will be patched by
102
* tb_target_set_jmp_target later
103
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/ppc/tcg-target.c.inc
106
+++ b/tcg/ppc/tcg-target.c.inc
107
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
108
109
switch (opc) {
110
case INDEX_op_goto_tb:
111
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
112
/* Direct jump. */
113
if (TCG_TARGET_REG_BITS == 64) {
114
/* Ensure the next insns are 8 or 16-byte aligned. */
115
while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
116
tcg_out32(s, NOP);
117
}
118
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
119
+ set_jmp_insn_offset(s, args[0]);
120
tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
121
tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
122
} else {
123
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
124
+ set_jmp_insn_offset(s, args[0]);
125
tcg_out32(s, B);
126
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
127
+ set_jmp_reset_offset(s, args[0]);
128
break;
56
break;
129
}
57
case INDEX_op_qemu_ld_a32_i32:
130
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
58
case INDEX_op_qemu_ld_a64_i32:
131
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
59
+ done = fold_qemu_ld_1reg(&ctx, op);
132
index XXXXXXX..XXXXXXX 100644
60
+ break;
133
--- a/tcg/s390x/tcg-target.c.inc
61
case INDEX_op_qemu_ld_a32_i64:
134
+++ b/tcg/s390x/tcg-target.c.inc
62
case INDEX_op_qemu_ld_a64_i64:
135
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
63
+ if (TCG_TARGET_REG_BITS == 64) {
136
tcg_out16(s, NOP);
64
+ done = fold_qemu_ld_1reg(&ctx, op);
137
}
65
+ break;
138
tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
66
+ }
139
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
67
+ QEMU_FALLTHROUGH;
140
+ set_jmp_insn_offset(s, a0);
68
case INDEX_op_qemu_ld_a32_i128:
141
s->code_ptr += 2;
69
case INDEX_op_qemu_ld_a64_i128:
142
set_jmp_reset_offset(s, a0);
70
- done = fold_qemu_ld(&ctx, op);
143
break;
71
+ done = fold_qemu_ld_2reg(&ctx, op);
144
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
72
break;
145
index XXXXXXX..XXXXXXX 100644
73
case INDEX_op_qemu_st8_a32_i32:
146
--- a/tcg/sparc64/tcg-target.c.inc
74
case INDEX_op_qemu_st8_a64_i32:
147
+++ b/tcg/sparc64/tcg-target.c.inc
148
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
149
150
switch (opc) {
151
case INDEX_op_goto_tb:
152
- qemu_build_assert(TCG_TARGET_HAS_direct_jump);
153
/* Direct jump. */
154
if (USE_REG_TB) {
155
/* make sure the patch is 8-byte aligned. */
156
if ((intptr_t)s->code_ptr & 4) {
157
tcg_out_nop(s);
158
}
159
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
160
+ set_jmp_insn_offset(s, a0);
161
tcg_out_sethi(s, TCG_REG_T1, 0);
162
tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
163
tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
164
tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
165
} else {
166
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
167
+ set_jmp_insn_offset(s, a0);
168
tcg_out32(s, CALL);
169
tcg_out_nop(s);
170
}
171
--
75
--
172
2.34.1
76
2.43.0
173
174
diff view generated by jsdifflib
1
Now that tcg can handle direct and indirect goto_tb
1
Stores have no output operands, and so need no further work.
2
simultaneously, we can optimistically leave space for
3
a direct branch and fall back to loading the pointer
4
from the TB for an indirect branch.
5
2
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
tcg/arm/tcg-target.c.inc | 52 ++++++++++++++++++++++++++++------------
6
tcg/optimize.c | 11 +++++------
10
1 file changed, 37 insertions(+), 15 deletions(-)
7
1 file changed, 5 insertions(+), 6 deletions(-)
11
8
12
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/arm/tcg-target.c.inc
11
--- a/tcg/optimize.c
15
+++ b/tcg/arm/tcg-target.c.inc
12
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
17
ARITH_BIC = 0xe << 21,
18
ARITH_MVN = 0xf << 21,
19
20
+ INSN_B = 0x0a000000,
21
+
22
INSN_CLZ = 0x016f0f10,
23
INSN_RBIT = 0x06ff0f30,
24
25
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
26
27
static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
28
{
14
{
29
- tcg_out32(s, (cond << 28) | 0x0a000000 |
15
/* Opcodes that touch guest memory stop the mb optimization. */
30
+ tcg_out32(s, (cond << 28) | INSN_B |
16
ctx->prev_mb = NULL;
31
(((offset - 8) >> 2) & 0x00ffffff));
17
- return false;
18
+ return true;
32
}
19
}
33
20
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
35
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
36
static void tcg_out_goto_tb(TCGContext *s, int which)
23
37
{
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
38
- /* Indirect jump method */
25
remove_mem_copy_all(ctx);
39
- intptr_t ptr, dif, dil;
26
- return false;
40
- TCGReg base = TCG_REG_PC;
27
+ return true;
41
+ uintptr_t i_addr;
42
+ intptr_t i_disp;
43
44
- ptr = get_jmp_target_addr(s, which);
45
- dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
46
- dil = sextract32(dif, 0, 12);
47
- if (dif != dil) {
48
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
49
+ set_jmp_insn_offset(s, which);
50
+ tcg_out32(s, INSN_NOP);
51
+
52
+ /* When branch is out of range, fall through to indirect. */
53
+ i_addr = get_jmp_target_addr(s, which);
54
+ i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
55
+ tcg_debug_assert(i_disp < 0);
56
+ if (i_disp >= -0xfff) {
57
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
58
+ } else {
59
/*
60
* The TB is close, but outside the 12 bits addressable by
61
* the load. We can extend this to 20 bits with a sub of a
62
- * shifted immediate from pc. In the vastly unlikely event
63
- * the code requires more than 1MB, we'll use 2 insns and
64
- * be no worse off.
65
+ * shifted immediate from pc.
66
*/
67
- base = TCG_REG_R0;
68
- tcg_out_movi32(s, COND_AL, base, ptr - dil);
69
+ int h = -i_disp;
70
+ int l = h & 0xfff;
71
+
72
+ h = encode_imm_nofail(h - l);
73
+ tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
74
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
75
}
28
}
76
- tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
29
77
set_jmp_reset_offset(s, which);
30
switch (op->opc) {
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
32
g_assert_not_reached();
33
}
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
35
- return false;
36
+ return true;
78
}
37
}
79
38
80
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
81
uintptr_t jmp_rx, uintptr_t jmp_rw)
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
82
{
41
TCGType type;
83
- /* Always indirect, nothing to do */
42
84
+ uintptr_t addr = tb->jmp_target_addr[n];
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
85
+ ptrdiff_t offset = addr - (jmp_rx + 8);
44
- fold_tcg_st(ctx, op);
86
+ tcg_insn_unit insn;
45
- return false;
87
+
46
+ return fold_tcg_st(ctx, op);
88
+ /* Either directly branch, or fall through to indirect branch. */
47
}
89
+ if (offset == sextract64(offset, 0, 26)) {
48
90
+ /* B <addr> */
49
src = arg_temp(op->args[0]);
91
+ insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
92
+ } else {
51
last = ofs + tcg_type_size(type) - 1;
93
+ insn = INSN_NOP;
52
remove_mem_copy_in(ctx, ofs, last);
94
+ }
53
record_mem_copy(ctx, type, src, ofs, last);
95
+
54
- return false;
96
+ qatomic_set((uint32_t *)jmp_rw, insn);
55
+ return true;
97
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
98
}
56
}
99
57
100
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
101
--
59
--
102
2.34.1
60
2.43.0
103
104
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return finish_folding(ctx, op);
16
}
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
71
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
14
}
15
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
17
- ctx->s_mask = -1;
18
- return false;
19
+ return fold_masks_s(ctx, op, -1);
20
}
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
14
return fold_setcond(ctx, op);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
21
do_setcond_const:
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
23
--
24
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
13
op->args[3] = tcg_swap_cond(op->args[3]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
13
op->args[5] = tcg_invert_cond(op->args[5]);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 24 +++++++++---------------
7
1 file changed, 9 insertions(+), 15 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
1
This is always true for sparc64, so this is dead since 3a5f6805c7ca.
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/sparc64/tcg-target.c.inc | 62 ++++++++++++------------------------
6
tcg/optimize.c | 27 ++++++++++++++-------------
8
1 file changed, 21 insertions(+), 41 deletions(-)
7
1 file changed, 14 insertions(+), 13 deletions(-)
9
8
10
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/sparc64/tcg-target.c.inc
11
--- a/tcg/optimize.c
13
+++ b/tcg/sparc64/tcg-target.c.inc
12
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
#endif
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
16
15
{
17
#define TCG_REG_TB TCG_REG_I1
16
uint64_t s_mask, z_mask, sign;
18
-#define USE_REG_TB (sizeof(void *) > 4)
17
+ TempOptInfo *t1, *t2;
19
18
20
static const int tcg_target_reg_alloc_order[] = {
19
if (fold_const2(ctx, op) ||
21
TCG_REG_L0,
20
fold_ix_to_i(ctx, op, 0) ||
22
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
23
}
24
24
25
/* A 13-bit constant relative to the TB. */
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- if (!in_prologue && USE_REG_TB) {
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ if (!in_prologue) {
27
+ t1 = arg_info(op->args[1]);
28
test = tcg_tbrel_diff(s, (void *)arg);
28
+ t2 = arg_info(op->args[2]);
29
if (check_fit_ptr(test, 13)) {
29
+ s_mask = t1->s_mask;
30
tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
30
+ z_mask = t1->z_mask;
31
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
32
}
44
}
33
45
34
/* Use the constant pool, if possible. */
46
switch (op->opc) {
35
- if (!in_prologue && USE_REG_TB) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
36
+ if (!in_prologue) {
48
* Arithmetic right shift will not reduce the number of
37
new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
49
* input sign repetitions.
38
tcg_tbrel_diff(s, NULL));
50
*/
39
tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
51
- ctx->s_mask = s_mask;
40
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
52
- break;
41
#endif
53
+ return fold_masks_s(ctx, op, s_mask);
42
54
CASE_OP_32_64(shr):
43
/* We choose TCG_REG_TB such that no move is required. */
55
/*
44
- if (USE_REG_TB) {
56
* If the sign bit is known zero, then logical right shift
45
- QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
57
- * will not reduced the number of input sign repetitions.
46
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
58
+ * will not reduce the number of input sign repetitions.
47
- }
59
*/
48
+ QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
60
- sign = (s_mask & -s_mask) >> 1;
49
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
61
+ sign = -s_mask;
50
62
if (sign && !(z_mask & sign)) {
51
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
63
- ctx->s_mask = s_mask;
52
/* delay slot */
64
+ return fold_masks_s(ctx, op, s_mask);
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
65
}
54
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
66
break;
55
tcg_out_movi_imm13(s, TCG_REG_O0, a0);
67
default:
56
return;
68
break;
57
- } else if (USE_REG_TB) {
58
+ } else {
59
intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
60
if (check_fit_ptr(tb_diff, 13)) {
61
tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
62
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
63
64
static void tcg_out_goto_tb(TCGContext *s, int which)
65
{
66
+ int c;
67
+
68
/* Direct jump. */
69
- if (USE_REG_TB) {
70
- /* make sure the patch is 8-byte aligned. */
71
- if ((intptr_t)s->code_ptr & 4) {
72
- tcg_out_nop(s);
73
- }
74
- set_jmp_insn_offset(s, which);
75
- tcg_out_sethi(s, TCG_REG_T1, 0);
76
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
77
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
78
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
79
- } else {
80
- set_jmp_insn_offset(s, which);
81
- tcg_out32(s, CALL);
82
+ /* make sure the patch is 8-byte aligned. */
83
+ if ((intptr_t)s->code_ptr & 4) {
84
tcg_out_nop(s);
85
}
69
}
86
+ set_jmp_insn_offset(s, which);
70
87
+ tcg_out_sethi(s, TCG_REG_T1, 0);
71
- return false;
88
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
72
+ return finish_folding(ctx, op);
89
+ tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
90
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
91
set_jmp_reset_offset(s, which);
92
93
/*
94
* For the unlinked path of goto_tb, we need to reset TCG_REG_TB
95
* to the beginning of this TB.
96
*/
97
- if (USE_REG_TB) {
98
- int c = -tcg_current_code_size(s);
99
- if (check_fit_i32(c, 13)) {
100
- tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
101
- } else {
102
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
103
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
104
- }
105
+ c = -tcg_current_code_size(s);
106
+ if (check_fit_i32(c, 13)) {
107
+ tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
108
+ } else {
109
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
110
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
111
}
112
}
73
}
113
74
114
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
115
switch (opc) {
116
case INDEX_op_goto_ptr:
117
tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
118
- if (USE_REG_TB) {
119
- tcg_out_mov_delay(s, TCG_REG_TB, a0);
120
- } else {
121
- tcg_out_nop(s);
122
- }
123
+ tcg_out_mov_delay(s, TCG_REG_TB, a0);
124
break;
125
case INDEX_op_br:
126
tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
127
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
128
tcg_debug_assert(tb_disp == (int32_t)tb_disp);
129
tcg_debug_assert(br_disp == (int32_t)br_disp);
130
131
- if (!USE_REG_TB) {
132
- qatomic_set((uint32_t *)jmp_rw,
133
-         deposit32(CALL, 0, 30, br_disp >> 2));
134
- flush_idcache_range(jmp_rx, jmp_rw, 4);
135
- return;
136
- }
137
-
138
/* This does not exercise the range of the branch, but we do
139
still need to be able to load the new value of TCG_REG_TB.
140
But this does still happen quite often. */
141
--
76
--
142
2.34.1
77
2.43.0
143
144
diff view generated by jsdifflib
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
7
---
4
tcg/i386/tcg-target.c.inc | 14 +++++---------
8
tcg/optimize.c | 5 ++---
5
1 file changed, 5 insertions(+), 9 deletions(-)
9
1 file changed, 2 insertions(+), 3 deletions(-)
6
10
7
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/i386/tcg-target.c.inc
13
--- a/tcg/optimize.c
10
+++ b/tcg/i386/tcg-target.c.inc
14
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
12
16
13
switch (opc) {
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
14
case INDEX_op_goto_tb:
18
{
15
- if (s->tb_jmp_insn_offset) {
19
- uint64_t s_mask, z_mask, sign;
16
- /* direct jump method */
20
+ uint64_t s_mask, z_mask;
17
- int gap;
21
TempOptInfo *t1, *t2;
18
- /* jump displacement must be aligned for atomic patching;
22
19
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
23
if (fold_const2(ctx, op) ||
20
+ {
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
21
+ /*
25
* If the sign bit is known zero, then logical right shift
22
+ * Jump displacement must be aligned for atomic patching;
26
* will not reduce the number of input sign repetitions.
23
* see if we need to add extra nops before jump
27
*/
24
*/
28
- sign = -s_mask;
25
- gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
29
- if (sign && !(z_mask & sign)) {
26
+ int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
30
+ if (~z_mask & -s_mask) {
27
if (gap != 1) {
31
return fold_masks_s(ctx, op, s_mask);
28
tcg_out_nopn(s, gap - 1);
29
}
30
tcg_out8(s, OPC_JMP_long); /* jmp im */
31
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
32
tcg_out32(s, 0);
33
- } else {
34
- /* indirect jump method */
35
- tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1,
36
- (intptr_t)(s->tb_jmp_target_addr + a0));
37
}
32
}
38
set_jmp_reset_offset(s, a0);
39
break;
33
break;
40
--
34
--
41
2.34.1
35
2.43.0
42
43
diff view generated by jsdifflib
New patch
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 9 ++++++---
8
1 file changed, 6 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
15
fold_sub_to_neg(ctx, op)) {
16
return true;
17
}
18
- return false;
19
+ return finish_folding(ctx, op);
20
}
21
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
23
{
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
25
+ if (fold_const2(ctx, op) ||
26
+ fold_xx_to_i(ctx, op, 0) ||
27
+ fold_xi_to_x(ctx, op, 0) ||
28
+ fold_sub_to_neg(ctx, op)) {
29
return true;
30
}
31
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
34
op->args[2] = arg_new_constant(ctx, -val);
35
}
36
- return false;
37
+ return finish_folding(ctx, op);
38
}
39
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
41
--
42
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 16 +++++++++-------
7
1 file changed, 9 insertions(+), 7 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask = -1, s_mask = 0;
18
+
19
/* We can't do any folding with a load, but we can record bits. */
20
switch (op->opc) {
21
CASE_OP_32_64(ld8s):
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
23
+ s_mask = INT8_MIN;
24
break;
25
CASE_OP_32_64(ld8u):
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
28
break;
29
CASE_OP_32_64(ld16s):
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
31
+ s_mask = INT16_MIN;
32
break;
33
CASE_OP_32_64(ld16u):
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
12
TCGType type;
13
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
type = ctx->type;
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Remove fold_masks as the function becomes unused.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 18 ++++++++----------
8
1 file changed, 8 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
15
return fold_masks_zs(ctx, op, -1, s_mask);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
-{
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
21
-}
22
-
23
/*
24
* An "affected" mask bit is 0 if and only if the result is identical
25
* to the first input. Thus if the entire mask is 0, the operation
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
27
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask, s_mask;
31
+ TempOptInfo *t1, *t2;
32
+
33
if (fold_const2_commutative(ctx, op) ||
34
fold_xx_to_i(ctx, op, 0) ||
35
fold_xi_to_x(ctx, op, 0) ||
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
37
return true;
38
}
39
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
41
- | arg_info(op->args[2])->z_mask;
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
43
- & arg_info(op->args[2])->s_mask;
44
- return fold_masks(ctx, op);
45
+ t1 = arg_info(op->args[1]);
46
+ t2 = arg_info(op->args[2]);
47
+ z_mask = t1->z_mask | t2->z_mask;
48
+ s_mask = t1->s_mask & t2->s_mask;
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
50
}
51
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
1
Now that tcg can handle direct and indirect goto_tb simultaneously,
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
we can optimistically leave space for a direct branch and fall back
3
to loading the pointer from the TB for an indirect branch.
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
tcg/riscv/tcg-target.c.inc | 19 +++++++++++++++++--
4
tcg/optimize.c | 2 +-
9
1 file changed, 17 insertions(+), 2 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
10
6
11
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/riscv/tcg-target.c.inc
9
--- a/tcg/optimize.c
14
+++ b/tcg/riscv/tcg-target.c.inc
10
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
16
12
return fold_orc(ctx, op);
17
static void tcg_out_goto_tb(TCGContext *s, int which)
13
}
18
{
14
}
19
- /* indirect jump method */
15
- return false;
20
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
16
+ return finish_folding(ctx, op);
21
+ set_jmp_insn_offset(s, which);
22
+ tcg_out32(s, OPC_JAL);
23
+
24
+ /* When branch is out of range, fall through to indirect. */
25
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
26
get_jmp_target_addr(s, which));
27
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
28
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
29
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
30
uintptr_t jmp_rx, uintptr_t jmp_rw)
31
{
32
- /* Always indirect, nothing to do */
33
+ uintptr_t addr = tb->jmp_target_addr[n];
34
+ ptrdiff_t offset = addr - jmp_rx;
35
+ tcg_insn_unit insn;
36
+
37
+ /* Either directly branch, or fall through to indirect branch. */
38
+ if (offset == sextreg(offset, 0, 20)) {
39
+ insn = encode_uj(OPC_JAL, TCG_REG_ZERO, offset);
40
+ } else {
41
+ insn = OPC_NOP;
42
+ }
43
+ qatomic_set((uint32_t *)jmp_rw, insn);
44
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
45
}
17
}
46
18
47
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
19
/* Propagate constants and copies, fold constant expressions. */
48
--
20
--
49
2.34.1
21
2.43.0
50
51
diff view generated by jsdifflib
New patch
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 6 ++----
8
1 file changed, 2 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
15
done = true;
16
break;
17
default:
18
+ done = finish_folding(&ctx, op);
19
break;
20
}
21
-
22
- if (!done) {
23
- finish_folding(&ctx, op);
24
- }
25
+ tcg_debug_assert(done);
26
}
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
All mask setting is now done with parameters via fold_masks_*.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 -------------
7
1 file changed, 13 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
15
16
/* In flight values from optimization. */
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
19
TCGType type;
20
} OptContext;
21
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
23
for (i = 0; i < nb_oargs; i++) {
24
TCGTemp *ts = arg_temp(op->args[i]);
25
reset_ts(ctx, ts);
26
- /*
27
- * Save the corresponding known-zero/sign bits mask for the
28
- * first output argument (only one supported so far).
29
- */
30
- if (i == 0) {
31
- ts_info(ts)->z_mask = ctx->z_mask;
32
- }
33
}
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
47
--
48
2.43.0
diff view generated by jsdifflib
New patch
1
All instances of s_mask have been converted to the new
2
representation. We can now re-enable usage.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 4 ++--
8
1 file changed, 2 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
15
g_assert_not_reached();
16
}
17
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
20
return true;
21
}
22
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
24
s_mask = s_mask_old >> pos;
25
s_mask |= -1ull << (len - 1);
26
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
29
return true;
30
}
31
32
--
33
2.43.0
diff view generated by jsdifflib
1
The old sparc64 implementation may replace two insns, which leaves
1
The big comment just above says functions should be sorted.
2
a race condition in which a thread could be stopped at a PC in the
2
Add forward declarations as needed.
3
middle of the sequence, and when restarted does not see the complete
4
address computation and branches to nowhere.
5
3
6
The new implemetation replaces only one insn, swapping between a
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
direct branch and a direct call. The TCG_REG_TB register is loaded
8
from tb->jmp_target_addr[] in the delay slot.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
tcg/sparc64/tcg-target.c.inc | 87 +++++++++++++++---------------------
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
14
1 file changed, 37 insertions(+), 50 deletions(-)
8
1 file changed, 59 insertions(+), 55 deletions(-)
15
9
16
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/sparc64/tcg-target.c.inc
12
--- a/tcg/optimize.c
19
+++ b/tcg/sparc64/tcg-target.c.inc
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
21
15
* 3) those that produce information about the result value.
22
static void tcg_out_goto_tb(TCGContext *s, int which)
16
*/
17
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
21
+
22
static bool fold_add(OptContext *ctx, TCGOp *op)
23
{
23
{
24
- int c;
24
if (fold_const2_commutative(ctx, op) ||
25
+ ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
26
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
27
- /* Direct jump. */
28
- /* make sure the patch is 8-byte aligned. */
29
- if ((intptr_t)s->code_ptr & 4) {
30
- tcg_out_nop(s);
31
- }
32
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
33
set_jmp_insn_offset(s, which);
34
- tcg_out_sethi(s, TCG_REG_T1, 0);
35
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
36
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
37
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
38
+ tcg_out32(s, CALL);
39
+ /* delay slot */
40
+ tcg_debug_assert(check_fit_ptr(off, 13));
41
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
42
set_jmp_reset_offset(s, which);
43
44
/*
45
* For the unlinked path of goto_tb, we need to reset TCG_REG_TB
46
* to the beginning of this TB.
47
*/
48
- c = -tcg_current_code_size(s);
49
- if (check_fit_i32(c, 13)) {
50
- tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
51
+ off = -tcg_current_code_size(s);
52
+ if (check_fit_i32(off, 13)) {
53
+ tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
54
} else {
55
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
56
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
57
tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
58
}
59
}
27
}
60
28
61
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
62
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
63
+{
30
+{
64
+ uintptr_t addr = tb->jmp_target_addr[n];
31
+ /* If true and false values are the same, eliminate the cmp. */
65
+ intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2;
32
+ if (args_are_copies(op->args[2], op->args[3])) {
66
+ tcg_insn_unit insn;
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
67
+
68
+ br_disp >>= 2;
69
+ if (check_fit_ptr(br_disp, 19)) {
70
+ /* ba,pt %icc, addr */
71
+ insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
72
+ | BPCC_ICC | BPCC_PT, 0, 19, br_disp);
73
+ } else if (check_fit_ptr(br_disp, 22)) {
74
+ /* ba addr */
75
+ insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A),
76
+ 0, 22, br_disp);
77
+ } else {
78
+ /* The code_gen_buffer can't be larger than 2GB. */
79
+ tcg_debug_assert(check_fit_ptr(br_disp, 30));
80
+ /* call addr */
81
+ insn = deposit32(CALL, 0, 30, br_disp);
82
+ }
34
+ }
83
+
35
+
84
+ qatomic_set((uint32_t *)jmp_rw, insn);
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
85
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
37
+ uint64_t tv = arg_info(op->args[2])->val;
38
+ uint64_t fv = arg_info(op->args[3])->val;
39
+
40
+ if (tv == -1 && fv == 0) {
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
42
+ }
43
+ if (tv == 0 && fv == -1) {
44
+ if (TCG_TARGET_HAS_not_vec) {
45
+ op->opc = INDEX_op_not_vec;
46
+ return fold_not(ctx, op);
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
51
+ }
52
+ }
53
+ }
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
86
+}
82
+}
87
+
83
+
88
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
89
const TCGArg args[TCG_MAX_OP_ARGS],
90
const int const_args[TCG_MAX_OP_ARGS])
91
@@ -XXX,XX +XXX,XX @@ void tcg_register_jit(const void *buf, size_t buf_size)
92
{
85
{
93
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
94
}
89
}
95
-
90
96
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
97
- uintptr_t jmp_rx, uintptr_t jmp_rw)
98
-{
92
-{
99
- uintptr_t addr = tb->jmp_target_addr[n];
93
- /* If true and false values are the same, eliminate the cmp. */
100
- intptr_t tb_disp = addr - (uintptr_t)tb->tc.ptr;
94
- if (args_are_copies(op->args[2], op->args[3])) {
101
- intptr_t br_disp = addr - jmp_rx;
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
102
- tcg_insn_unit i1, i2;
103
-
104
- /* We can reach the entire address space for ILP32.
105
- For LP64, the code_gen_buffer can't be larger than 2GB. */
106
- tcg_debug_assert(tb_disp == (int32_t)tb_disp);
107
- tcg_debug_assert(br_disp == (int32_t)br_disp);
108
-
109
- /* This does not exercise the range of the branch, but we do
110
- still need to be able to load the new value of TCG_REG_TB.
111
- But this does still happen quite often. */
112
- if (check_fit_ptr(tb_disp, 13)) {
113
- /* ba,pt %icc, addr */
114
- i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
115
- | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
116
- i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
117
- | INSN_IMM13(tb_disp));
118
- } else if (tb_disp >= 0) {
119
- i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
120
- i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
121
- | INSN_IMM13(tb_disp & 0x3ff));
122
- } else {
123
- i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
124
- i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
125
- | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
126
- }
96
- }
127
-
97
-
128
- qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
129
- flush_idcache_range(jmp_rx, jmp_rw, 8);
99
- uint64_t tv = arg_info(op->args[2])->val;
100
- uint64_t fv = arg_info(op->args[3])->val;
101
-
102
- if (tv == -1 && fv == 0) {
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
104
- }
105
- if (tv == 0 && fv == -1) {
106
- if (TCG_TARGET_HAS_not_vec) {
107
- op->opc = INDEX_op_not_vec;
108
- return fold_not(ctx, op);
109
- } else {
110
- op->opc = INDEX_op_xor_vec;
111
- op->args[2] = arg_new_constant(ctx, -1);
112
- return fold_xor(ctx, op);
113
- }
114
- }
115
- }
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
130
-}
144
-}
145
-
146
/* Propagate constants and copies, fold constant expressions. */
147
void tcg_optimize(TCGContext *s)
148
{
131
--
149
--
132
2.34.1
150
2.43.0
133
134
diff view generated by jsdifflib
1
Replace 'tc_ptr' and 'addr' with 'tb' and 'n'.
1
The big comment just above says functions should be sorted.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
5
---
6
tcg/aarch64/tcg-target.h | 3 ++-
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
7
tcg/arm/tcg-target.h | 3 ++-
7
1 file changed, 30 insertions(+), 30 deletions(-)
8
tcg/i386/tcg-target.h | 9 ++-------
9
tcg/loongarch64/tcg-target.h | 3 ++-
10
tcg/mips/tcg-target.h | 3 ++-
11
tcg/ppc/tcg-target.h | 3 ++-
12
tcg/riscv/tcg-target.h | 3 ++-
13
tcg/s390x/tcg-target.h | 10 ++--------
14
tcg/sparc64/tcg-target.h | 3 ++-
15
tcg/tci/tcg-target.h | 3 ++-
16
accel/tcg/cpu-exec.c | 11 ++++++++---
17
tcg/aarch64/tcg-target.c.inc | 5 +++--
18
tcg/i386/tcg-target.c.inc | 9 +++++++++
19
tcg/loongarch64/tcg-target.c.inc | 5 +++--
20
tcg/ppc/tcg-target.c.inc | 7 ++++---
21
tcg/s390x/tcg-target.c.inc | 10 ++++++++++
22
tcg/sparc64/tcg-target.c.inc | 7 ++++---
23
17 files changed, 61 insertions(+), 36 deletions(-)
24
8
25
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
26
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/aarch64/tcg-target.h
11
--- a/tcg/optimize.c
28
+++ b/tcg/aarch64/tcg-target.h
12
+++ b/tcg/optimize.c
29
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
30
#define TCG_TARGET_DEFAULT_MO (0)
14
return true;
31
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
32
33
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
34
+void tb_target_set_jmp_target(const TranslationBlock *, int,
35
+ uintptr_t, uintptr_t);
36
37
#define TCG_TARGET_NEED_LDST_LABELS
38
#define TCG_TARGET_NEED_POOL_LABELS
39
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/arm/tcg-target.h
42
+++ b/tcg/arm/tcg-target.h
43
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
44
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
45
46
/* not defined -- call should be eliminated at compile time */
47
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
48
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
49
+ uintptr_t, uintptr_t);
50
51
#define TCG_TARGET_NEED_LDST_LABELS
52
#define TCG_TARGET_NEED_POOL_LABELS
53
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/i386/tcg-target.h
56
+++ b/tcg/i386/tcg-target.h
57
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
58
#define TCG_TARGET_extract_i64_valid(ofs, len) \
59
(((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
60
61
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
62
- uintptr_t jmp_rw, uintptr_t addr)
63
-{
64
- /* patch the branch destination */
65
- qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
66
- /* no need to flush icache explicitly */
67
-}
68
+void tb_target_set_jmp_target(const TranslationBlock *, int,
69
+ uintptr_t, uintptr_t);
70
71
/* This defines the natural memory order supported by this
72
* architecture before guarantees made by various barrier
73
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/loongarch64/tcg-target.h
76
+++ b/tcg/loongarch64/tcg-target.h
77
@@ -XXX,XX +XXX,XX @@ typedef enum {
78
#define TCG_TARGET_HAS_muluh_i64 1
79
#define TCG_TARGET_HAS_mulsh_i64 1
80
81
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
82
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
83
+ uintptr_t, uintptr_t);
84
85
#define TCG_TARGET_DEFAULT_MO (0)
86
87
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
88
index XXXXXXX..XXXXXXX 100644
89
--- a/tcg/mips/tcg-target.h
90
+++ b/tcg/mips/tcg-target.h
91
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
92
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
93
94
/* not defined -- call should be eliminated at compile time */
95
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t)
96
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
97
+ uintptr_t, uintptr_t)
98
QEMU_ERROR("code path is reachable");
99
100
#define TCG_TARGET_NEED_LDST_LABELS
101
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
102
index XXXXXXX..XXXXXXX 100644
103
--- a/tcg/ppc/tcg-target.h
104
+++ b/tcg/ppc/tcg-target.h
105
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
106
#define TCG_TARGET_HAS_bitsel_vec have_vsx
107
#define TCG_TARGET_HAS_cmpsel_vec 0
108
109
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
110
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
111
+ uintptr_t, uintptr_t);
112
113
#define TCG_TARGET_DEFAULT_MO (0)
114
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
115
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
116
index XXXXXXX..XXXXXXX 100644
117
--- a/tcg/riscv/tcg-target.h
118
+++ b/tcg/riscv/tcg-target.h
119
@@ -XXX,XX +XXX,XX @@ typedef enum {
120
#endif
121
122
/* not defined -- call should be eliminated at compile time */
123
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
124
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
125
+ uintptr_t, uintptr_t);
126
127
#define TCG_TARGET_DEFAULT_MO (0)
128
129
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
130
index XXXXXXX..XXXXXXX 100644
131
--- a/tcg/s390x/tcg-target.h
132
+++ b/tcg/s390x/tcg-target.h
133
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
134
135
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
136
137
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
138
- uintptr_t jmp_rw, uintptr_t addr)
139
-{
140
- /* patch the branch destination */
141
- intptr_t disp = addr - (jmp_rx - 2);
142
- qatomic_set((int32_t *)jmp_rw, disp / 2);
143
- /* no need to flush icache explicitly */
144
-}
145
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
146
+ uintptr_t jmp_rx, uintptr_t jmp_rw);
147
148
#define TCG_TARGET_NEED_LDST_LABELS
149
#define TCG_TARGET_NEED_POOL_LABELS
150
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
151
index XXXXXXX..XXXXXXX 100644
152
--- a/tcg/sparc64/tcg-target.h
153
+++ b/tcg/sparc64/tcg-target.h
154
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
155
#define TCG_TARGET_DEFAULT_MO (0)
156
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
157
158
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
159
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
160
+ uintptr_t, uintptr_t);
161
162
#define TCG_TARGET_NEED_POOL_LABELS
163
164
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tcg/tci/tcg-target.h
167
+++ b/tcg/tci/tcg-target.h
168
@@ -XXX,XX +XXX,XX @@ typedef enum {
169
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
170
171
/* not defined -- call should be eliminated at compile time */
172
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
173
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
174
+ uintptr_t, uintptr_t);
175
176
#endif /* TCG_TARGET_H */
177
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
178
index XXXXXXX..XXXXXXX 100644
179
--- a/accel/tcg/cpu-exec.c
180
+++ b/accel/tcg/cpu-exec.c
181
@@ -XXX,XX +XXX,XX @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
182
{
183
tb->jmp_target_addr[n] = addr;
184
if (TCG_TARGET_HAS_direct_jump) {
185
+ /*
186
+ * Get the rx view of the structure, from which we find the
187
+ * executable code address, and tb_target_set_jmp_target can
188
+ * produce a pc-relative displacement to jmp_target_addr[n].
189
+ */
190
+ const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
191
uintptr_t offset = tb->jmp_insn_offset[n];
192
- uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
193
- uintptr_t jmp_rx = tc_ptr + offset;
194
+ uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
195
uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
196
- tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
197
+ tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
198
}
199
}
15
}
200
16
201
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
202
index XXXXXXX..XXXXXXX 100644
203
--- a/tcg/aarch64/tcg-target.c.inc
204
+++ b/tcg/aarch64/tcg-target.c.inc
205
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
206
tcg_out_call_int(s, target);
207
}
208
209
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
210
- uintptr_t jmp_rw, uintptr_t addr)
211
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
212
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
213
{
214
+ uintptr_t addr = tb->jmp_target_addr[n];
215
tcg_insn_unit i1, i2;
216
TCGType rt = TCG_TYPE_I64;
217
TCGReg rd = TCG_REG_TMP;
218
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
219
index XXXXXXX..XXXXXXX 100644
220
--- a/tcg/i386/tcg-target.c.inc
221
+++ b/tcg/i386/tcg-target.c.inc
222
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
223
set_jmp_reset_offset(s, which);
224
}
225
226
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
227
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
228
+{
18
+{
229
+ /* patch the branch destination */
19
+ /* Canonicalize the comparison to put immediate second. */
230
+ uintptr_t addr = tb->jmp_target_addr[n];
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
231
+ qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
232
+ /* no need to flush icache explicitly */
22
+ }
23
+ return finish_folding(ctx, op);
233
+}
24
+}
234
+
25
+
235
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
236
const TCGArg args[TCG_MAX_OP_ARGS],
237
const int const_args[TCG_MAX_OP_ARGS])
238
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
239
index XXXXXXX..XXXXXXX 100644
240
--- a/tcg/loongarch64/tcg-target.c.inc
241
+++ b/tcg/loongarch64/tcg-target.c.inc
242
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nop(TCGContext *s)
243
tcg_out32(s, NOP);
244
}
245
246
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
247
- uintptr_t jmp_rw, uintptr_t addr)
248
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
249
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
250
{
251
tcg_insn_unit i1, i2;
252
ptrdiff_t upper, lower;
253
+ uintptr_t addr = tb->jmp_target_addr[n];
254
ptrdiff_t offset = (ptrdiff_t)(addr - jmp_rx) >> 2;
255
256
if (offset == sextreg(offset, 0, 26)) {
257
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
258
index XXXXXXX..XXXXXXX 100644
259
--- a/tcg/ppc/tcg-target.c.inc
260
+++ b/tcg/ppc/tcg-target.c.inc
261
@@ -XXX,XX +XXX,XX @@ static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw,
262
flush_idcache_range(rx, rw, 16);
263
}
264
265
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
266
- uintptr_t jmp_rw, uintptr_t addr)
267
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
268
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
269
{
270
tcg_insn_unit i0, i1, i2, i3;
271
- intptr_t tb_diff = addr - tc_ptr;
272
+ uintptr_t addr = tb->jmp_target_addr[n];
273
+ intptr_t tb_diff = addr - (uintptr_t)tb->tc.ptr;
274
intptr_t br_diff = addr - (jmp_rx + 4);
275
intptr_t lo, hi;
276
277
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
278
index XXXXXXX..XXXXXXX 100644
279
--- a/tcg/s390x/tcg-target.c.inc
280
+++ b/tcg/s390x/tcg-target.c.inc
281
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
282
set_jmp_reset_offset(s, which);
283
}
284
285
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
286
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
287
+{
27
+{
288
+ /* patch the branch destination */
28
+ /* If true and false values are the same, eliminate the cmp. */
289
+ uintptr_t addr = tb->jmp_target_addr[n];
29
+ if (args_are_copies(op->args[3], op->args[4])) {
290
+ intptr_t disp = addr - (jmp_rx - 2);
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
291
+ qatomic_set((int32_t *)jmp_rw, disp / 2);
31
+ }
292
+ /* no need to flush icache explicitly */
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
293
+}
45
+}
294
+
46
+
295
# define OP_32_64(x) \
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
296
case glue(glue(INDEX_op_,x),_i32): \
48
{
297
case glue(glue(INDEX_op_,x),_i64)
49
uint64_t z_mask, s_mask;
298
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
299
index XXXXXXX..XXXXXXX 100644
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
300
--- a/tcg/sparc64/tcg-target.c.inc
301
+++ b/tcg/sparc64/tcg-target.c.inc
302
@@ -XXX,XX +XXX,XX @@ void tcg_register_jit(const void *buf, size_t buf_size)
303
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
304
}
52
}
305
53
306
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
307
- uintptr_t jmp_rw, uintptr_t addr)
55
-{
308
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
56
- /* Canonicalize the comparison to put immediate second. */
309
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
58
- op->args[3] = tcg_swap_cond(op->args[3]);
59
- }
60
- return finish_folding(ctx, op);
61
-}
62
-
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
64
-{
65
- /* If true and false values are the same, eliminate the cmp. */
66
- if (args_are_copies(op->args[3], op->args[4])) {
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
68
- }
69
-
70
- /* Canonicalize the comparison to put immediate second. */
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
82
-}
83
-
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
310
{
85
{
311
- intptr_t tb_disp = addr - tc_ptr;
86
uint64_t z_mask, s_mask, s_mask_old;
312
+ uintptr_t addr = tb->jmp_target_addr[n];
313
+ intptr_t tb_disp = addr - (uintptr_t)tb->tc.ptr;
314
intptr_t br_disp = addr - jmp_rx;
315
tcg_insn_unit i1, i2;
316
317
--
87
--
318
2.34.1
88
2.43.0
319
320
diff view generated by jsdifflib
1
Similar to the existing set_jmp_reset_offset. Include the
1
We currently have a flag, float_muladd_halve_result, to scale
2
rw->rx address space conversion done by arm and s390x, and
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
forgotten by mips and riscv.
3
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
tcg/tcg.c | 9 +++++++++
7
include/fpu/softfloat.h | 6 ++++
10
tcg/arm/tcg-target.c.inc | 2 +-
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
11
tcg/mips/tcg-target.c.inc | 2 +-
9
fpu/softfloat-parts.c.inc | 7 +++--
12
tcg/riscv/tcg-target.c.inc | 2 +-
10
3 files changed, 44 insertions(+), 27 deletions(-)
13
tcg/tci/tcg-target.c.inc | 2 +-
11
14
5 files changed, 13 insertions(+), 4 deletions(-)
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
15
16
diff --git a/tcg/tcg.c b/tcg/tcg.c
17
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tcg.c
14
--- a/include/fpu/softfloat.h
19
+++ b/tcg/tcg.c
15
+++ b/include/fpu/softfloat.h
20
@@ -XXX,XX +XXX,XX @@ static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
21
s->tb_jmp_insn_offset[which] = tcg_current_code_size(s);
17
float16 float16_sub(float16, float16, float_status *status);
22
}
18
float16 float16_mul(float16, float16, float_status *status);
23
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
24
+static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
20
+float16 float16_muladd_scalbn(float16, float16, float16,
21
+ int, int, float_status *status);
22
float16 float16_div(float16, float16, float_status *status);
23
float16 float16_scalbn(float16, int, float_status *status);
24
float16 float16_min(float16, float16, float_status *status);
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
26
float32 float32_div(float32, float32, float_status *status);
27
float32 float32_rem(float32, float32, float_status *status);
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
29
+float32 float32_muladd_scalbn(float32, float32, float32,
30
+ int, int, float_status *status);
31
float32 float32_sqrt(float32, float_status *status);
32
float32 float32_exp2(float32, float_status *status);
33
float32 float32_log2(float32, float_status *status);
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
35
float64 float64_div(float64, float64, float_status *status);
36
float64 float64_rem(float64, float64, float_status *status);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
25
+{
97
+{
26
+ /*
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
27
+ * Return the read-execute version of the pointer, for the benefit
28
+ * of any pc-relative addressing mode.
29
+ */
30
+ return (uintptr_t)tcg_splitwx_to_rx(&s->tb_jmp_target_addr[which]);
31
+}
99
+}
32
+
100
+
33
/* Signal overflow, starting over with fewer guest insns. */
101
+float32 QEMU_SOFTFLOAT_ATTR
34
static G_NORETURN
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
35
void tcg_raise_tb_overflow(TCGContext *s)
103
+ int scale, int flags, float_status *status)
36
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
104
{
105
FloatParts64 pa, pb, pc, *pr;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
187
}
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
37
index XXXXXXX..XXXXXXX 100644
190
index XXXXXXX..XXXXXXX 100644
38
--- a/tcg/arm/tcg-target.c.inc
191
--- a/fpu/softfloat-parts.c.inc
39
+++ b/tcg/arm/tcg-target.c.inc
192
+++ b/fpu/softfloat-parts.c.inc
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
41
TCGReg base = TCG_REG_PC;
194
* Requires A and C extracted into a double-sized structure to provide the
42
195
* extra space for the widening multiply.
43
qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
196
*/
44
- ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]);
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
45
+ ptr = get_jmp_target_addr(s, args[0]);
198
- FloatPartsN *c, int flags, float_status *s)
46
dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
47
dil = sextract32(dif, 0, 12);
200
+ FloatPartsN *c, int scale,
48
if (dif != dil) {
201
+ int flags, float_status *s)
49
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
202
{
50
index XXXXXXX..XXXXXXX 100644
203
int ab_mask, abc_mask;
51
--- a/tcg/mips/tcg-target.c.inc
204
FloatPartsW p_widen, c_widen;
52
+++ b/tcg/mips/tcg-target.c.inc
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
206
a->exp = p_widen.exp;
54
/* indirect jump method */
207
55
qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
208
return_normal:
56
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
57
- (uintptr_t)(s->tb_jmp_target_addr + a0));
210
if (flags & float_muladd_halve_result) {
58
+ get_jmp_target_addr(s, a0));
211
a->exp -= 1;
59
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
212
}
60
tcg_out_nop(s);
213
+ a->exp += scale;
61
set_jmp_reset_offset(s, a0);
214
finish_sign:
62
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
215
if (flags & float_muladd_negate_result) {
63
index XXXXXXX..XXXXXXX 100644
216
a->sign ^= 1;
64
--- a/tcg/riscv/tcg-target.c.inc
65
+++ b/tcg/riscv/tcg-target.c.inc
66
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
67
qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
68
/* indirect jump method */
69
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
70
- (uintptr_t)(s->tb_jmp_target_addr + a0));
71
+ get_jmp_target_addr(s, a0));
72
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
73
set_jmp_reset_offset(s, a0);
74
break;
75
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/tci/tcg-target.c.inc
78
+++ b/tcg/tci/tcg-target.c.inc
79
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
80
case INDEX_op_goto_tb:
81
qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
82
/* indirect jump method. */
83
- tcg_out_op_p(s, opc, s->tb_jmp_target_addr + args[0]);
84
+ tcg_out_op_p(s, opc, (void *)get_jmp_target_addr(s, args[0]));
85
set_jmp_reset_offset(s, args[0]);
86
break;
87
88
--
217
--
89
2.34.1
218
2.43.0
90
219
91
220
diff view generated by jsdifflib
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
Use the scalbn interface instead of float_muladd_halve_result.
2
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/riscv/tcg-target.c.inc | 3 ++-
6
target/arm/tcg/helper-a64.c | 6 +++---
6
1 file changed, 2 insertions(+), 1 deletion(-)
7
1 file changed, 3 insertions(+), 3 deletions(-)
7
8
8
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/riscv/tcg-target.c.inc
11
--- a/target/arm/tcg/helper-a64.c
11
+++ b/tcg/riscv/tcg-target.c.inc
12
+++ b/target/arm/tcg/helper-a64.c
12
@@ -XXX,XX +XXX,XX @@ typedef enum {
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
13
#endif
14
(float16_is_infinity(b) && float16_is_zero(a))) {
14
15
return float16_one_point_five;
15
OPC_FENCE = 0x0000000f,
16
+ OPC_NOP = OPC_ADDI, /* nop = addi r0,r0,0 */
17
} RISCVInsn;
18
19
/*
20
@@ -XXX,XX +XXX,XX @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
21
{
22
int i;
23
for (i = 0; i < count; ++i) {
24
- p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0);
25
+ p[i] = OPC_NOP;
26
}
16
}
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
27
}
19
}
28
20
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
23
(float32_is_infinity(b) && float32_is_zero(a))) {
24
return float32_one_point_five;
25
}
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
28
}
29
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
32
(float64_is_infinity(b) && float64_is_zero(a))) {
33
return float64_one_point_five;
34
}
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
37
}
38
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
29
--
40
--
30
2.34.1
41
2.43.0
31
42
32
43
diff view generated by jsdifflib
1
Test TCG_TARGET_HAS_direct_jump instead of testing an
1
Use the scalbn interface instead of float_muladd_halve_result.
2
implementation pointer.
2
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
tcg/aarch64/tcg-target.c.inc | 2 +-
6
target/sparc/helper.h | 4 +-
9
tcg/arm/tcg-target.c.inc | 2 +-
7
target/sparc/fop_helper.c | 8 ++--
10
tcg/loongarch64/tcg-target.c.inc | 2 +-
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
11
tcg/mips/tcg-target.c.inc | 2 +-
9
3 files changed, 54 insertions(+), 38 deletions(-)
12
tcg/riscv/tcg-target.c.inc | 2 +-
10
13
tcg/tci/tcg-target.c.inc | 2 +-
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
14
6 files changed, 6 insertions(+), 6 deletions(-)
15
16
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/aarch64/tcg-target.c.inc
13
--- a/target/sparc/helper.h
19
+++ b/tcg/aarch64/tcg-target.c.inc
14
+++ b/target/sparc/helper.h
20
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
21
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
22
switch (opc) {
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
23
case INDEX_op_goto_tb:
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
24
- tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
25
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
26
/*
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
27
* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
28
* write can be used to patch the target address.
23
29
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
32
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
30
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/arm/tcg-target.c.inc
35
--- a/target/sparc/fop_helper.c
32
+++ b/tcg/arm/tcg-target.c.inc
36
+++ b/target/sparc/fop_helper.c
33
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
34
intptr_t ptr, dif, dil;
38
}
35
TCGReg base = TCG_REG_PC;
39
36
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
37
- tcg_debug_assert(s->tb_jmp_insn_offset == 0);
41
- float32 s2, float32 s3, uint32_t op)
38
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
39
ptr = (intptr_t)tcg_splitwx_to_rx(s->tb_jmp_target_addr + args[0]);
43
{
40
dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
41
dil = sextract32(dif, 0, 12);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
42
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
43
index XXXXXXX..XXXXXXX 100644
60
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/loongarch64/tcg-target.c.inc
61
--- a/target/sparc/translate.c
45
+++ b/tcg/loongarch64/tcg-target.c.inc
62
+++ b/target/sparc/translate.c
46
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
47
64
48
switch (opc) {
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
49
case INDEX_op_goto_tb:
66
{
50
- tcg_debug_assert(s->tb_jmp_insn_offset != NULL);
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
51
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
68
+ TCGv_i32 z = tcg_constant_i32(0);
52
/*
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
53
* Ensure that patch area is 8-byte aligned so that an
70
}
54
* atomic write can be used to patch the target address.
71
55
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
56
index XXXXXXX..XXXXXXX 100644
73
{
57
--- a/tcg/mips/tcg-target.c.inc
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
58
+++ b/tcg/mips/tcg-target.c.inc
75
+ TCGv_i32 z = tcg_constant_i32(0);
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
60
switch (opc) {
77
}
61
case INDEX_op_goto_tb:
78
62
/* indirect jump method */
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
63
- tcg_debug_assert(s->tb_jmp_insn_offset == 0);
80
{
64
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
81
- int op = float_muladd_negate_c;
65
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
66
(uintptr_t)(s->tb_jmp_target_addr + a0));
83
+ TCGv_i32 z = tcg_constant_i32(0);
67
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
68
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
69
index XXXXXXX..XXXXXXX 100644
86
}
70
--- a/tcg/riscv/tcg-target.c.inc
87
71
+++ b/tcg/riscv/tcg-target.c.inc
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
72
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
89
{
73
90
- int op = float_muladd_negate_c;
74
switch (opc) {
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
75
case INDEX_op_goto_tb:
92
+ TCGv_i32 z = tcg_constant_i32(0);
76
- assert(s->tb_jmp_insn_offset == 0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
77
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
78
/* indirect jump method */
95
}
79
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
96
80
(uintptr_t)(s->tb_jmp_target_addr + a0));
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
81
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
98
{
82
index XXXXXXX..XXXXXXX 100644
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
83
--- a/tcg/tci/tcg-target.c.inc
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
84
+++ b/tcg/tci/tcg-target.c.inc
101
+ TCGv_i32 z = tcg_constant_i32(0);
85
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
86
103
+ float_muladd_negate_result);
87
switch (opc) {
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
88
case INDEX_op_goto_tb:
105
}
89
- tcg_debug_assert(s->tb_jmp_insn_offset == 0);
106
90
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
91
/* indirect jump method. */
108
{
92
tcg_out_op_p(s, opc, s->tb_jmp_target_addr + args[0]);
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
93
set_jmp_reset_offset(s, args[0]);
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
94
--
205
--
95
2.34.1
206
2.43.0
96
207
97
208
diff view generated by jsdifflib
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
All uses have been convered to float*_muladd_scalbn.
2
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
include/tcg/tcg.h | 3 +++
6
include/fpu/softfloat.h | 3 ---
6
tcg/aarch64/tcg-target.h | 4 ----
7
fpu/softfloat.c | 6 ------
7
tcg/arm/tcg-target.h | 5 -----
8
fpu/softfloat-parts.c.inc | 4 ----
8
tcg/i386/tcg-target.h | 3 ---
9
3 files changed, 13 deletions(-)
9
tcg/loongarch64/tcg-target.h | 3 ---
10
tcg/mips/tcg-target.h | 5 -----
11
tcg/ppc/tcg-target.h | 4 ----
12
tcg/riscv/tcg-target.h | 4 ----
13
tcg/s390x/tcg-target.h | 4 ----
14
tcg/sparc64/tcg-target.h | 4 ----
15
tcg/tci/tcg-target.h | 4 ----
16
11 files changed, 3 insertions(+), 40 deletions(-)
17
10
18
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
19
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
20
--- a/include/tcg/tcg.h
13
--- a/include/fpu/softfloat.h
21
+++ b/include/tcg/tcg.h
14
+++ b/include/fpu/softfloat.h
22
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s);
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
23
16
| Using these differs from negating an input or output before calling
24
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start);
17
| the muladd function in that this means that a NaN doesn't have its
25
18
| sign bit inverted before it is propagated.
26
+void tb_target_set_jmp_target(const TranslationBlock *, int,
19
-| We also support halving the result before rounding, as a special
27
+ uintptr_t, uintptr_t);
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
28
+
21
*----------------------------------------------------------------------------*/
29
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
22
enum {
30
23
float_muladd_negate_c = 1,
31
TCGTemp *tcg_global_mem_new_internal(TCGType, TCGv_ptr,
24
float_muladd_negate_product = 2,
32
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
25
float_muladd_negate_result = 4,
26
- float_muladd_halve_result = 8,
27
};
28
29
/*----------------------------------------------------------------------------
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
31
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/aarch64/tcg-target.h
32
--- a/fpu/softfloat.c
35
+++ b/tcg/aarch64/tcg-target.h
33
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ typedef enum {
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
35
if (unlikely(!can_use_fpu(s))) {
38
#define TCG_TARGET_DEFAULT_MO (0)
36
goto soft;
39
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
37
}
40
-
38
- if (unlikely(flags & float_muladd_halve_result)) {
41
-void tb_target_set_jmp_target(const TranslationBlock *, int,
39
- goto soft;
42
- uintptr_t, uintptr_t);
40
- }
43
-
41
44
#define TCG_TARGET_NEED_LDST_LABELS
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
#define TCG_TARGET_NEED_POOL_LABELS
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
47
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
48
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/arm/tcg-target.h
56
--- a/fpu/softfloat-parts.c.inc
50
+++ b/tcg/arm/tcg-target.h
57
+++ b/fpu/softfloat-parts.c.inc
51
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
52
59
a->exp = p_widen.exp;
53
#define TCG_TARGET_DEFAULT_MO (0)
60
54
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
61
return_normal:
55
-
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
56
-/* not defined -- call should be eliminated at compile time */
63
- if (flags & float_muladd_halve_result) {
57
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
64
- a->exp -= 1;
58
- uintptr_t, uintptr_t);
65
- }
59
-
66
a->exp += scale;
60
#define TCG_TARGET_NEED_LDST_LABELS
67
finish_sign:
61
#define TCG_TARGET_NEED_POOL_LABELS
68
if (flags & float_muladd_negate_result) {
62
63
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/i386/tcg-target.h
66
+++ b/tcg/i386/tcg-target.h
67
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
68
#define TCG_TARGET_extract_i64_valid(ofs, len) \
69
(((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
70
71
-void tb_target_set_jmp_target(const TranslationBlock *, int,
72
- uintptr_t, uintptr_t);
73
-
74
/* This defines the natural memory order supported by this
75
* architecture before guarantees made by various barrier
76
* instructions.
77
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
78
index XXXXXXX..XXXXXXX 100644
79
--- a/tcg/loongarch64/tcg-target.h
80
+++ b/tcg/loongarch64/tcg-target.h
81
@@ -XXX,XX +XXX,XX @@ typedef enum {
82
#define TCG_TARGET_HAS_muluh_i64 1
83
#define TCG_TARGET_HAS_mulsh_i64 1
84
85
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
86
- uintptr_t, uintptr_t);
87
-
88
#define TCG_TARGET_DEFAULT_MO (0)
89
90
#define TCG_TARGET_NEED_LDST_LABELS
91
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
92
index XXXXXXX..XXXXXXX 100644
93
--- a/tcg/mips/tcg-target.h
94
+++ b/tcg/mips/tcg-target.h
95
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
96
#define TCG_TARGET_DEFAULT_MO (0)
97
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
98
99
-/* not defined -- call should be eliminated at compile time */
100
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
101
- uintptr_t, uintptr_t)
102
- QEMU_ERROR("code path is reachable");
103
-
104
#define TCG_TARGET_NEED_LDST_LABELS
105
106
#endif
107
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
108
index XXXXXXX..XXXXXXX 100644
109
--- a/tcg/ppc/tcg-target.h
110
+++ b/tcg/ppc/tcg-target.h
111
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
112
#define TCG_TARGET_HAS_bitsel_vec have_vsx
113
#define TCG_TARGET_HAS_cmpsel_vec 0
114
115
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
116
- uintptr_t, uintptr_t);
117
-
118
#define TCG_TARGET_DEFAULT_MO (0)
119
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
120
-
121
#define TCG_TARGET_NEED_LDST_LABELS
122
#define TCG_TARGET_NEED_POOL_LABELS
123
124
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
125
index XXXXXXX..XXXXXXX 100644
126
--- a/tcg/riscv/tcg-target.h
127
+++ b/tcg/riscv/tcg-target.h
128
@@ -XXX,XX +XXX,XX @@ typedef enum {
129
#define TCG_TARGET_HAS_mulsh_i64 1
130
#endif
131
132
-/* not defined -- call should be eliminated at compile time */
133
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
134
- uintptr_t, uintptr_t);
135
-
136
#define TCG_TARGET_DEFAULT_MO (0)
137
138
#define TCG_TARGET_NEED_LDST_LABELS
139
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/s390x/tcg-target.h
142
+++ b/tcg/s390x/tcg-target.h
143
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
144
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
145
146
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
147
-
148
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
149
- uintptr_t jmp_rx, uintptr_t jmp_rw);
150
-
151
#define TCG_TARGET_NEED_LDST_LABELS
152
#define TCG_TARGET_NEED_POOL_LABELS
153
154
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
155
index XXXXXXX..XXXXXXX 100644
156
--- a/tcg/sparc64/tcg-target.h
157
+++ b/tcg/sparc64/tcg-target.h
158
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
159
160
#define TCG_TARGET_DEFAULT_MO (0)
161
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
162
-
163
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
164
- uintptr_t, uintptr_t);
165
-
166
#define TCG_TARGET_NEED_POOL_LABELS
167
168
#endif
169
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
170
index XXXXXXX..XXXXXXX 100644
171
--- a/tcg/tci/tcg-target.h
172
+++ b/tcg/tci/tcg-target.h
173
@@ -XXX,XX +XXX,XX @@ typedef enum {
174
175
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
176
177
-/* not defined -- call should be eliminated at compile time */
178
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
179
- uintptr_t, uintptr_t);
180
-
181
#endif /* TCG_TARGET_H */
182
--
69
--
183
2.34.1
70
2.43.0
184
71
185
72
diff view generated by jsdifflib
New patch
1
This rounding mode is used by Hexagon.
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
include/fpu/softfloat-types.h | 2 ++
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
8
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/fpu/softfloat-types.h
12
+++ b/include/fpu/softfloat-types.h
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
14
float_round_to_odd = 5,
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
16
float_round_to_odd_inf = 6,
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
18
+ float_round_nearest_even_max = 7,
19
} FloatRoundMode;
20
21
/*
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
23
index XXXXXXX..XXXXXXX 100644
24
--- a/fpu/softfloat-parts.c.inc
25
+++ b/fpu/softfloat-parts.c.inc
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
27
int exp, flags = 0;
28
29
switch (s->float_rounding_mode) {
30
+ case float_round_nearest_even_max:
31
+ overflow_norm = true;
32
+ /* fall through */
33
case float_round_nearest_even:
34
if (N > 64 && frac_lsb == 0) {
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
36
--
37
2.43.0
diff view generated by jsdifflib
New patch
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/fpu/softfloat.h | 5 +++++
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
10
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/fpu/softfloat.h
14
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
16
| Using these differs from negating an input or output before calling
17
| the muladd function in that this means that a NaN doesn't have its
18
| sign bit inverted before it is propagated.
19
+|
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
21
+| such that the product is a true zero, then return C without addition.
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
23
*----------------------------------------------------------------------------*/
24
enum {
25
float_muladd_negate_c = 1,
26
float_muladd_negate_product = 2,
27
float_muladd_negate_result = 4,
28
+ float_muladd_suppress_add_product_zero = 8,
29
};
30
31
/*----------------------------------------------------------------------------
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/fpu/softfloat.c
35
+++ b/fpu/softfloat.c
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
37
if (unlikely(!can_use_fpu(s))) {
38
goto soft;
39
}
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
61
--
62
2.43.0
diff view generated by jsdifflib
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/sparc64/tcg-target.c.inc | 41 +++++++++++-------------------------
7
target/hexagon/fma_emu.h | 1 -
5
1 file changed, 12 insertions(+), 29 deletions(-)
8
target/hexagon/fma_emu.c | 8 --------
9
target/hexagon/op_helper.c | 2 +-
10
3 files changed, 1 insertion(+), 10 deletions(-)
6
11
7
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
8
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/sparc64/tcg-target.c.inc
14
--- a/target/hexagon/fma_emu.h
10
+++ b/tcg/sparc64/tcg-target.c.inc
15
+++ b/target/hexagon/fma_emu.h
11
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
12
return false;
17
float32 infinite_float32(uint8_t sign);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
19
int scale, float_status *fp_status);
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
21
float64 internal_mpyhh(float64 a, float64 b,
22
unsigned long long int accumulated,
23
float_status *fp_status);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/hexagon/fma_emu.c
27
+++ b/target/hexagon/fma_emu.c
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
29
return accum_round_float32(result, fp_status);
13
}
30
}
14
31
15
-static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
16
-{
33
-{
17
- intptr_t diff = tcg_tbrel_diff(s, arg);
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
18
- if (USE_REG_TB && check_fit_ptr(diff, 13)) {
35
- return float32_mul(a, b, fp_status);
19
- tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
20
- return;
21
- }
36
- }
22
- tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
23
- tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
24
-}
38
-}
25
-
39
-
26
static void tcg_out_sety(TCGContext *s, TCGReg rs)
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
27
{
48
{
28
tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
49
float32 RdV;
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
50
arch_fpop_start(env);
30
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
31
switch (opc) {
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
32
case INDEX_op_goto_tb:
53
arch_fpop_end(env);
33
- if (s->tb_jmp_insn_offset) {
54
return RdV;
34
- /* direct jump method */
55
}
35
- if (USE_REG_TB) {
36
- /* make sure the patch is 8-byte aligned. */
37
- if ((intptr_t)s->code_ptr & 4) {
38
- tcg_out_nop(s);
39
- }
40
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
41
- tcg_out_sethi(s, TCG_REG_T1, 0);
42
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
43
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
44
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
45
- } else {
46
- s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
47
- tcg_out32(s, CALL);
48
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
49
+ /* Direct jump. */
50
+ if (USE_REG_TB) {
51
+ /* make sure the patch is 8-byte aligned. */
52
+ if ((intptr_t)s->code_ptr & 4) {
53
tcg_out_nop(s);
54
}
55
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
56
+ tcg_out_sethi(s, TCG_REG_T1, 0);
57
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
58
+ tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
59
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
60
} else {
61
- /* indirect jump method */
62
- tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
63
- tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
64
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
65
+ tcg_out32(s, CALL);
66
tcg_out_nop(s);
67
}
68
set_jmp_reset_offset(s, a0);
69
--
56
--
70
2.34.1
57
2.43.0
71
72
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/op_helper.c | 2 +-
7
1 file changed, 1 insertion(+), 1 deletion(-)
8
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/hexagon/op_helper.c
12
+++ b/target/hexagon/op_helper.c
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
14
float32 RsV, float32 RtV)
15
{
16
arch_fpop_start(env);
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
19
arch_fpop_end(env);
20
return RxV;
21
}
22
--
23
2.43.0
diff view generated by jsdifflib
New patch
1
There are no special cases for this instruction. Since hexagon
2
always uses default-nan mode, explicitly negating the first
3
input is unnecessary. Use float_muladd_negate_product instead.
1
4
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/op_helper.c | 5 ++---
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/hexagon/op_helper.c
14
+++ b/target/hexagon/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
27
}
28
--
29
2.43.0
diff view generated by jsdifflib
New patch
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
1
5
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/hexagon/op_helper.c | 11 +++--------
10
1 file changed, 3 insertions(+), 8 deletions(-)
11
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/op_helper.c
15
+++ b/target/hexagon/op_helper.c
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
18
float32 RsV, float32 RtV, float32 PuV)
19
{
20
- size4s_t tmp;
21
arch_fpop_start(env);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
35
--
36
2.43.0
diff view generated by jsdifflib
1
Stop overloading jmp_target_arg for both offset and address,
1
There are multiple special cases for this instruction.
2
depending on TCG_TARGET_HAS_direct_jump. Instead, add a new
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
field to hold the jump insn offset and always set the target
3
handled by the new float_round_nearest_even_max rounding mode.
4
address in jmp_target_addr[]. This will allow a tcg backend
4
(2) The 0 * n + c special case is handled by the new
5
to use either direct or indirect depending on displacement.
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
6
8
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
11
---
10
include/exec/exec-all.h | 3 ++-
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
11
accel/tcg/cpu-exec.c | 5 ++---
13
1 file changed, 26 insertions(+), 79 deletions(-)
12
tcg/tcg.c | 6 ++++--
13
3 files changed, 8 insertions(+), 6 deletions(-)
14
14
15
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
16
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/exec-all.h
17
--- a/target/hexagon/op_helper.c
18
+++ b/include/exec/exec-all.h
18
+++ b/target/hexagon/op_helper.c
19
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
20
*/
20
return RxV;
21
#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
21
}
22
uint16_t jmp_reset_offset[2]; /* offset of original jump target */
22
23
- uintptr_t jmp_target_arg[2]; /* target address or offset */
23
-static bool is_zero_prod(float32 a, float32 b)
24
+ uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
24
-{
25
+ uintptr_t jmp_target_addr[2]; /* target address */
25
- return ((float32_is_zero(a) && is_finite(b)) ||
26
26
- (float32_is_zero(b) && is_finite(a)));
27
/*
27
-}
28
* Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
28
-
29
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
30
index XXXXXXX..XXXXXXX 100644
30
-{
31
--- a/accel/tcg/cpu-exec.c
31
- float32 ret = dst;
32
+++ b/accel/tcg/cpu-exec.c
32
- if (float32_is_any_nan(x)) {
33
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
33
- if (extract32(x, 22, 1) == 0) {
34
34
- float_raise(float_flag_invalid, fp_status);
35
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
35
- }
36
- ret = make_float32(0xffffffff); /* nan */
37
- }
38
- return ret;
39
-}
40
-
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
42
float32 RsV, float32 RtV, float32 PuV)
36
{
43
{
37
+ tb->jmp_target_addr[n] = addr;
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
38
if (TCG_TARGET_HAS_direct_jump) {
45
return RxV;
39
- uintptr_t offset = tb->jmp_target_arg[n];
40
+ uintptr_t offset = tb->jmp_insn_offset[n];
41
uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
42
uintptr_t jmp_rx = tc_ptr + offset;
43
uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
44
tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
45
- } else {
46
- tb->jmp_target_arg[n] = addr;
47
}
48
}
46
}
49
47
50
diff --git a/tcg/tcg.c b/tcg/tcg.c
48
-static bool is_inf_prod(int32_t a, int32_t b)
51
index XXXXXXX..XXXXXXX 100644
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
52
--- a/tcg/tcg.c
50
+ float32 RsV, float32 RtV, int negate)
53
+++ b/tcg/tcg.c
51
{
54
@@ -XXX,XX +XXX,XX @@ static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
55
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
56
*/
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
57
tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
55
+ int flags;
58
- s->gen_tb->jmp_target_arg[which] = tcg_current_code_size(s);
56
+
59
+ s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
57
+ arch_fpop_start(env);
58
+
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
60
+ RxV = float32_muladd(RsV, RtV, RxV,
61
+ negate | float_muladd_suppress_add_product_zero,
62
+ &env->fp_status);
63
+
64
+ flags = get_float_exception_flags(&env->fp_status);
65
+ if (flags) {
66
+ /* Flags are suppressed by this instruction. */
67
+ set_float_exception_flags(0, &env->fp_status);
68
+
69
+ /* Return 0 for Inf - Inf. */
70
+ if (flags & float_flag_invalid_isi) {
71
+ RxV = 0;
72
+ }
73
+ }
74
+
75
+ arch_fpop_end(env);
76
+ return RxV;
60
}
77
}
61
78
62
static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
63
@@ -XXX,XX +XXX,XX @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
80
float32 RsV, float32 RtV)
64
* Return the read-execute version of the pointer, for the benefit
81
{
65
* of any pc-relative addressing mode.
82
- bool infinp;
66
*/
83
- bool infminusinf;
67
- return (uintptr_t)tcg_splitwx_to_rx(s->gen_tb->jmp_target_arg + which);
84
- float32 tmp;
68
+ return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
85
-
86
- arch_fpop_start(env);
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
88
- infminusinf = float32_is_infinity(RxV) &&
89
- is_inf_prod(RsV, RtV) &&
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
91
- infinp = float32_is_infinity(RxV) ||
92
- float32_is_infinity(RtV) ||
93
- float32_is_infinity(RsV);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
99
- RxV = tmp;
100
- }
101
- set_float_exception_flags(0, &env->fp_status);
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
69
}
111
}
70
112
71
/* Signal overflow, starting over with fewer guest insns. */
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
72
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
114
float32 RsV, float32 RtV)
73
/* Initialize goto_tb jump offsets. */
115
{
74
tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
116
- bool infinp;
75
tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
117
- bool infminusinf;
76
+ tb->jmp_insn_offset[0] = TB_JMP_OFFSET_INVALID;
118
- float32 tmp;
77
+ tb->jmp_insn_offset[1] = TB_JMP_OFFSET_INVALID;
119
-
78
120
- arch_fpop_start(env);
79
tcg_reg_alloc_start(s);
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
80
122
- infminusinf = float32_is_infinity(RxV) &&
123
- is_inf_prod(RsV, RtV) &&
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
125
- infinp = float32_is_infinity(RxV) ||
126
- float32_is_infinity(RtV) ||
127
- float32_is_infinity(RsV);
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
134
- RxV = tmp;
135
- }
136
- set_float_exception_flags(0, &env->fp_status);
137
- if (float32_is_infinity(RxV) && !infinp) {
138
- RxV = RxV - 1;
139
- }
140
- if (infminusinf) {
141
- RxV = 0;
142
- }
143
- arch_fpop_end(env);
144
- return RxV;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
146
}
147
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
81
--
149
--
82
2.34.1
150
2.43.0
83
84
diff view generated by jsdifflib
New patch
1
The function is now unused.
1
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/hexagon/fma_emu.h | 2 -
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.h
13
+++ b/target/hexagon/fma_emu.h
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
15
}
16
int32_t float32_getexp(float32 f32);
17
float32 infinite_float32(uint8_t sign);
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
19
- int scale, float_status *fp_status);
20
float64 internal_mpyhh(float64 a, float64 b,
21
unsigned long long int accumulated,
22
float_status *fp_status);
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/hexagon/fma_emu.c
26
+++ b/target/hexagon/fma_emu.c
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
28
return -1;
29
}
30
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
219
--
220
2.43.0
diff view generated by jsdifflib
1
We now have the option to generate direct or indirect
1
This massive macro is now only used once.
2
goto_tb depending on the dynamic displacement, thus
2
Expand it for use only by float64.
3
the define is no longer necessary or completely accurate.
4
3
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
6
---
8
tcg/aarch64/tcg-target.h | 1 -
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
9
tcg/arm/tcg-target.h | 1 -
8
1 file changed, 127 insertions(+), 128 deletions(-)
10
tcg/i386/tcg-target.h | 1 -
11
tcg/loongarch64/tcg-target.h | 1 -
12
tcg/mips/tcg-target.h | 1 -
13
tcg/ppc/tcg-target.h | 1 -
14
tcg/riscv/tcg-target.h | 1 -
15
tcg/s390x/tcg-target.h | 1 -
16
tcg/sparc64/tcg-target.h | 1 -
17
tcg/tci/tcg-target.h | 1 -
18
accel/tcg/cpu-exec.c | 23 +++++++++++------------
19
tcg/tcg.c | 1 -
20
tcg/arm/tcg-target.c.inc | 1 -
21
tcg/mips/tcg-target.c.inc | 1 -
22
tcg/riscv/tcg-target.c.inc | 1 -
23
tcg/s390x/tcg-target.c.inc | 3 +++
24
tcg/tci/tcg-target.c.inc | 1 -
25
17 files changed, 14 insertions(+), 27 deletions(-)
26
9
27
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
28
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
29
--- a/tcg/aarch64/tcg-target.h
12
--- a/target/hexagon/fma_emu.c
30
+++ b/tcg/aarch64/tcg-target.h
13
+++ b/target/hexagon/fma_emu.c
31
@@ -XXX,XX +XXX,XX @@ typedef enum {
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
32
#define TCG_TARGET_HAS_muls2_i64 0
33
#define TCG_TARGET_HAS_muluh_i64 1
34
#define TCG_TARGET_HAS_mulsh_i64 1
35
-#define TCG_TARGET_HAS_direct_jump 1
36
37
#define TCG_TARGET_HAS_v64 1
38
#define TCG_TARGET_HAS_v128 1
39
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/arm/tcg-target.h
42
+++ b/tcg/arm/tcg-target.h
43
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
44
#define TCG_TARGET_HAS_mulsh_i32 0
45
#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
46
#define TCG_TARGET_HAS_rem_i32 0
47
-#define TCG_TARGET_HAS_direct_jump 0
48
#define TCG_TARGET_HAS_qemu_st8_i32 0
49
50
#define TCG_TARGET_HAS_v64 use_neon_instructions
51
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/i386/tcg-target.h
54
+++ b/tcg/i386/tcg-target.h
55
@@ -XXX,XX +XXX,XX @@ extern bool have_movbe;
56
#define TCG_TARGET_HAS_muls2_i32 1
57
#define TCG_TARGET_HAS_muluh_i32 0
58
#define TCG_TARGET_HAS_mulsh_i32 0
59
-#define TCG_TARGET_HAS_direct_jump 1
60
61
#if TCG_TARGET_REG_BITS == 64
62
/* Keep target addresses zero-extended in a register. */
63
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
64
index XXXXXXX..XXXXXXX 100644
65
--- a/tcg/loongarch64/tcg-target.h
66
+++ b/tcg/loongarch64/tcg-target.h
67
@@ -XXX,XX +XXX,XX @@ typedef enum {
68
#define TCG_TARGET_HAS_clz_i32 1
69
#define TCG_TARGET_HAS_ctz_i32 1
70
#define TCG_TARGET_HAS_ctpop_i32 0
71
-#define TCG_TARGET_HAS_direct_jump 1
72
#define TCG_TARGET_HAS_brcond2 0
73
#define TCG_TARGET_HAS_setcond2 0
74
#define TCG_TARGET_HAS_qemu_st8_i32 0
75
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/mips/tcg-target.h
78
+++ b/tcg/mips/tcg-target.h
79
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
80
#define TCG_TARGET_HAS_muluh_i32 1
81
#define TCG_TARGET_HAS_mulsh_i32 1
82
#define TCG_TARGET_HAS_bswap32_i32 1
83
-#define TCG_TARGET_HAS_direct_jump 0
84
85
#if TCG_TARGET_REG_BITS == 64
86
#define TCG_TARGET_HAS_add2_i32 0
87
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
88
index XXXXXXX..XXXXXXX 100644
89
--- a/tcg/ppc/tcg-target.h
90
+++ b/tcg/ppc/tcg-target.h
91
@@ -XXX,XX +XXX,XX @@ extern bool have_vsx;
92
#define TCG_TARGET_HAS_muls2_i32 0
93
#define TCG_TARGET_HAS_muluh_i32 1
94
#define TCG_TARGET_HAS_mulsh_i32 1
95
-#define TCG_TARGET_HAS_direct_jump 1
96
#define TCG_TARGET_HAS_qemu_st8_i32 0
97
98
#if TCG_TARGET_REG_BITS == 64
99
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/riscv/tcg-target.h
102
+++ b/tcg/riscv/tcg-target.h
103
@@ -XXX,XX +XXX,XX @@ typedef enum {
104
#define TCG_TARGET_HAS_clz_i32 0
105
#define TCG_TARGET_HAS_ctz_i32 0
106
#define TCG_TARGET_HAS_ctpop_i32 0
107
-#define TCG_TARGET_HAS_direct_jump 0
108
#define TCG_TARGET_HAS_brcond2 1
109
#define TCG_TARGET_HAS_setcond2 1
110
#define TCG_TARGET_HAS_qemu_st8_i32 0
111
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
112
index XXXXXXX..XXXXXXX 100644
113
--- a/tcg/s390x/tcg-target.h
114
+++ b/tcg/s390x/tcg-target.h
115
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
116
#define TCG_TARGET_HAS_mulsh_i32 0
117
#define TCG_TARGET_HAS_extrl_i64_i32 0
118
#define TCG_TARGET_HAS_extrh_i64_i32 0
119
-#define TCG_TARGET_HAS_direct_jump 1
120
#define TCG_TARGET_HAS_qemu_st8_i32 0
121
122
#define TCG_TARGET_HAS_div2_i64 1
123
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
124
index XXXXXXX..XXXXXXX 100644
125
--- a/tcg/sparc64/tcg-target.h
126
+++ b/tcg/sparc64/tcg-target.h
127
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
128
#define TCG_TARGET_HAS_muls2_i32 1
129
#define TCG_TARGET_HAS_muluh_i32 0
130
#define TCG_TARGET_HAS_mulsh_i32 0
131
-#define TCG_TARGET_HAS_direct_jump 1
132
#define TCG_TARGET_HAS_qemu_st8_i32 0
133
134
#define TCG_TARGET_HAS_extrl_i64_i32 1
135
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/tci/tcg-target.h
138
+++ b/tcg/tci/tcg-target.h
139
@@ -XXX,XX +XXX,XX @@
140
#define TCG_TARGET_HAS_muls2_i32 1
141
#define TCG_TARGET_HAS_muluh_i32 0
142
#define TCG_TARGET_HAS_mulsh_i32 0
143
-#define TCG_TARGET_HAS_direct_jump 0
144
#define TCG_TARGET_HAS_qemu_st8_i32 0
145
146
#if TCG_TARGET_REG_BITS == 64
147
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
148
index XXXXXXX..XXXXXXX 100644
149
--- a/accel/tcg/cpu-exec.c
150
+++ b/accel/tcg/cpu-exec.c
151
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
152
153
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
154
{
155
+ /*
156
+ * Get the rx view of the structure, from which we find the
157
+ * executable code address, and tb_target_set_jmp_target can
158
+ * produce a pc-relative displacement to jmp_target_addr[n].
159
+ */
160
+ const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
161
+ uintptr_t offset = tb->jmp_insn_offset[n];
162
+ uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
163
+ uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
164
+
165
tb->jmp_target_addr[n] = addr;
166
- if (TCG_TARGET_HAS_direct_jump) {
167
- /*
168
- * Get the rx view of the structure, from which we find the
169
- * executable code address, and tb_target_set_jmp_target can
170
- * produce a pc-relative displacement to jmp_target_addr[n].
171
- */
172
- const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
173
- uintptr_t offset = tb->jmp_insn_offset[n];
174
- uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
175
- uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
176
- tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
177
- }
178
+ tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
179
}
15
}
180
16
181
static inline void tb_add_jump(TranslationBlock *tb, int n,
17
/* Return a maximum finite value with the requested sign */
182
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
183
index XXXXXXX..XXXXXXX 100644
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
184
--- a/tcg/tcg.c
20
-{ \
185
+++ b/tcg/tcg.c
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
186
@@ -XXX,XX +XXX,XX @@ static void G_GNUC_UNUSED set_jmp_insn_offset(TCGContext *s, int which)
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
187
* We will check for overflow at the end of the opcode loop in
23
- /* result zero */ \
188
* tcg_gen_code, where we bound tcg_current_code_size to UINT16_MAX.
24
- switch (fp_status->float_rounding_mode) { \
189
*/
25
- case float_round_down: \
190
- tcg_debug_assert(TCG_TARGET_HAS_direct_jump);
26
- return zero_##SUFFIX(1); \
191
s->gen_tb->jmp_insn_offset[which] = tcg_current_code_size(s);
27
- default: \
28
- return zero_##SUFFIX(0); \
29
- } \
30
- } \
31
- /* Normalize right */ \
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
192
}
271
}
193
272
194
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
195
index XXXXXXX..XXXXXXX 100644
274
-
196
--- a/tcg/arm/tcg-target.c.inc
275
float64 internal_mpyhh(float64 a, float64 b,
197
+++ b/tcg/arm/tcg-target.c.inc
276
unsigned long long int accumulated,
198
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
277
float_status *fp_status)
199
intptr_t ptr, dif, dil;
200
TCGReg base = TCG_REG_PC;
201
202
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
203
ptr = get_jmp_target_addr(s, which);
204
dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
205
dil = sextract32(dif, 0, 12);
206
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
207
index XXXXXXX..XXXXXXX 100644
208
--- a/tcg/mips/tcg-target.c.inc
209
+++ b/tcg/mips/tcg-target.c.inc
210
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
211
static void tcg_out_goto_tb(TCGContext *s, int which)
212
{
213
/* indirect jump method */
214
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
215
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
216
get_jmp_target_addr(s, which));
217
tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
218
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
219
index XXXXXXX..XXXXXXX 100644
220
--- a/tcg/riscv/tcg-target.c.inc
221
+++ b/tcg/riscv/tcg-target.c.inc
222
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
223
224
static void tcg_out_goto_tb(TCGContext *s, int which)
225
{
226
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
227
/* indirect jump method */
228
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
229
get_jmp_target_addr(s, which));
230
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
231
index XXXXXXX..XXXXXXX 100644
232
--- a/tcg/s390x/tcg-target.c.inc
233
+++ b/tcg/s390x/tcg-target.c.inc
234
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
235
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
236
uintptr_t jmp_rx, uintptr_t jmp_rw)
237
{
238
+ if (!HAVE_FACILITY(GEN_INST_EXT)) {
239
+ return;
240
+ }
241
/* patch the branch destination */
242
uintptr_t addr = tb->jmp_target_addr[n];
243
intptr_t disp = addr - (jmp_rx - 2);
244
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
245
index XXXXXXX..XXXXXXX 100644
246
--- a/tcg/tci/tcg-target.c.inc
247
+++ b/tcg/tci/tcg-target.c.inc
248
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
249
250
static void tcg_out_goto_tb(TCGContext *s, int which)
251
{
252
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
253
/* indirect jump method. */
254
tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
255
set_jmp_reset_offset(s, which);
256
--
278
--
257
2.34.1
279
2.43.0
258
259
diff view generated by jsdifflib
New patch
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
1
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/fma_emu.c | 16 +++-------------
8
1 file changed, 3 insertions(+), 13 deletions(-)
9
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/hexagon/fma_emu.c
13
+++ b/target/hexagon/fma_emu.c
14
@@ -XXX,XX +XXX,XX @@ typedef union {
15
};
16
} Double;
17
18
-typedef union {
19
- float f;
20
- uint32_t i;
21
- struct {
22
- uint32_t mant:23;
23
- uint32_t exp:8;
24
- uint32_t sign:1;
25
- };
26
-} Float;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
Double a = { .i = f64 };
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
32
33
int32_t float32_getexp(float32 f32)
34
{
35
- Float a = { .i = f32 };
36
+ int exp = float32_getexp_raw(f32);
37
if (float32_is_normal(f32)) {
38
- return a.exp;
39
+ return exp;
40
}
41
if (float32_is_denormal(f32)) {
42
- return a.exp + 1;
43
+ return exp + 1;
44
}
45
return -1;
46
}
47
--
48
2.43.0
diff view generated by jsdifflib
1
The old implementation replaces two insns, swapping between
1
This structure, with bitfields, is incorrect for big-endian.
2
Use extract64 and deposit64 instead.
2
3
3
    b    <dest>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
    nop
5
    br    x30
6
and
7
    adrp    x30, <dest>
8
    addi    x30, x30, lo12:<dest>
9
    br    x30
10
11
There is a race condition in which a thread could be stopped at
12
the PC of the second insn, and when restarted does not see the
13
complete address computation and branches to nowhere.
14
15
The new implemetation replaces only one insn, swapping between
16
17
    b    <dest>
18
    br    tmp
19
and
20
    ldr    tmp, <jmp_addr>
21
    br    tmp
22
23
Reported-by: hev <r@hev.cc>
24
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
25
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
26
---
6
---
27
tcg/aarch64/tcg-target.h | 2 +-
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
28
tcg/aarch64/tcg-target.c.inc | 66 +++++++++++++++---------------------
8
1 file changed, 16 insertions(+), 30 deletions(-)
29
2 files changed, 29 insertions(+), 39 deletions(-)
30
9
31
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
32
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/aarch64/tcg-target.h
12
--- a/target/hexagon/fma_emu.c
34
+++ b/tcg/aarch64/tcg-target.h
13
+++ b/target/hexagon/fma_emu.c
35
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@
36
15
37
#define TCG_TARGET_INSN_UNIT_SIZE 4
16
#define WAY_BIG_EXP 4096
38
#define TCG_TARGET_TLB_DISPLACEMENT_BITS 24
17
39
-#define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
18
-typedef union {
40
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
19
- double f;
41
20
- uint64_t i;
42
typedef enum {
21
- struct {
43
TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3,
22
- uint64_t mant:52;
44
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
23
- uint64_t exp:11;
45
index XXXXXXX..XXXXXXX 100644
24
- uint64_t sign:1;
46
--- a/tcg/aarch64/tcg-target.c.inc
25
- };
47
+++ b/tcg/aarch64/tcg-target.c.inc
26
-} Double;
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
27
-
49
tcg_out_call_int(s, target);
28
static uint64_t float64_getmant(float64 f64)
29
{
30
- Double a = { .i = f64 };
31
+ uint64_t mant = extract64(f64, 0, 52);
32
if (float64_is_normal(f64)) {
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
50
}
44
}
51
45
52
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
46
int32_t float64_getexp(float64 f64)
53
- uintptr_t jmp_rx, uintptr_t jmp_rw)
54
-{
55
- uintptr_t addr = tb->jmp_target_addr[n];
56
- tcg_insn_unit i1, i2;
57
- TCGType rt = TCG_TYPE_I64;
58
- TCGReg rd = TCG_REG_TMP;
59
- uint64_t pair;
60
-
61
- ptrdiff_t offset = addr - jmp_rx;
62
-
63
- if (offset == sextract64(offset, 0, 26)) {
64
- i1 = I3206_B | ((offset >> 2) & 0x3ffffff);
65
- i2 = NOP;
66
- } else {
67
- offset = (addr >> 12) - (jmp_rx >> 12);
68
-
69
- /* patch ADRP */
70
- i1 = I3406_ADRP | (offset & 3) << 29 | (offset & 0x1ffffc) << (5 - 2) | rd;
71
- /* patch ADDI */
72
- i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd;
73
- }
74
- pair = (uint64_t)i2 << 32 | i1;
75
- qatomic_set((uint64_t *)jmp_rw, pair);
76
- flush_idcache_range(jmp_rx, jmp_rw, 8);
77
-}
78
-
79
static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
80
{
47
{
81
if (!l->has_value) {
48
- Double a = { .i = f64 };
82
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
49
+ int exp = extract64(f64, 52, 11);
83
static void tcg_out_goto_tb(TCGContext *s, int which)
50
if (float64_is_normal(f64)) {
51
- return a.exp;
52
+ return exp;
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
84
{
63
{
85
/*
64
+ uint64_t ret;
86
- * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
87
- * write can be used to patch the target address.
88
+ * Direct branch, or indirect address load, will be patched
89
+ * by tb_target_set_jmp_target. Assert indirect load offset
90
+ * in range early, regardless of direct branch distance.
91
*/
92
- if ((uintptr_t)s->code_ptr & 7) {
93
- tcg_out32(s, NOP);
94
- }
95
+ intptr_t i_off = tcg_pcrel_diff(s, (void *)get_jmp_target_addr(s, which));
96
+ tcg_debug_assert(i_off == sextract64(i_off, 0, 21));
97
+
65
+
98
set_jmp_insn_offset(s, which);
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
99
- /*
67
&& ((a.guard | a.round | a.sticky) == 0)) {
100
- * actual branch destination will be patched by
68
/* result zero */
101
- * tb_target_set_jmp_target later
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
102
- */
70
}
103
- tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
71
}
104
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
72
/* Underflow? */
105
+ tcg_out32(s, I3206_B);
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
106
tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
74
+ ret = int128_getlo(a.mant);
107
set_jmp_reset_offset(s, which);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
108
}
97
}
109
98
110
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
99
float64 internal_mpyhh(float64 a, float64 b,
111
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
112
+{
113
+ uintptr_t d_addr = tb->jmp_target_addr[n];
114
+ ptrdiff_t d_offset = d_addr - jmp_rx;
115
+ tcg_insn_unit insn;
116
+
117
+ /* Either directly branch, or indirect branch load. */
118
+ if (d_offset == sextract64(d_offset, 0, 28)) {
119
+ insn = deposit32(I3206_B, 0, 26, d_offset >> 2);
120
+ } else {
121
+ uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
122
+ ptrdiff_t i_offset = i_addr - jmp_rx;
123
+
124
+ /* Note that we asserted this in range in tcg_out_goto_tb. */
125
+ insn = deposit32(I3305_LDR | TCG_REG_TMP, 0, 5, i_offset >> 2);
126
+ }
127
+ qatomic_set((uint32_t *)jmp_rw, insn);
128
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
129
+}
130
+
131
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
132
const TCGArg args[TCG_MAX_OP_ARGS],
133
const int const_args[TCG_MAX_OP_ARGS])
134
--
100
--
135
2.34.1
101
2.43.0
136
137
diff view generated by jsdifflib
1
The old ppc64 implementation replaces 2 or 4 insns, which leaves a race
1
No need to open-code 64x64->128-bit multiplication.
2
condition in which a thread could be stopped at a PC in the middle of
3
the sequence, and when restarted does not see the complete address
4
computation and branches to nowhere.
5
2
6
The new implemetation replaces only one insn, swapping between
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
8
    b <dest>
9
and
10
    mtctr    r31
11
12
falling through to a general-case indirect branch.
13
14
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
5
---
17
tcg/ppc/tcg-target.h | 3 +-
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
18
tcg/ppc/tcg-target.c.inc | 158 +++++++++++----------------------------
7
1 file changed, 3 insertions(+), 29 deletions(-)
19
2 files changed, 44 insertions(+), 117 deletions(-)
20
8
21
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
22
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
23
--- a/tcg/ppc/tcg-target.h
11
--- a/target/hexagon/fma_emu.c
24
+++ b/tcg/ppc/tcg-target.h
12
+++ b/target/hexagon/fma_emu.c
25
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
26
14
return -1;
27
#ifdef _ARCH_PPC64
28
# define TCG_TARGET_REG_BITS 64
29
-# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
30
#else
31
# define TCG_TARGET_REG_BITS 32
32
-# define MAX_CODE_GEN_BUFFER_SIZE (32 * MiB)
33
#endif
34
+#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
35
36
#define TCG_TARGET_NB_REGS 64
37
#define TCG_TARGET_INSN_UNIT_SIZE 4
38
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/ppc/tcg-target.c.inc
41
+++ b/tcg/ppc/tcg-target.c.inc
42
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
43
tcg_out32(s, insn);
44
}
15
}
45
16
46
-static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2)
17
-static uint32_t int128_getw0(Int128 x)
47
-{
18
-{
48
- if (HOST_BIG_ENDIAN) {
19
- return int128_getlo(x);
49
- return (uint64_t)i1 << 32 | i2;
50
- }
51
- return (uint64_t)i2 << 32 | i1;
52
-}
20
-}
53
-
21
-
54
-static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw,
22
-static uint32_t int128_getw1(Int128 x)
55
- tcg_insn_unit i0, tcg_insn_unit i1)
56
-{
23
-{
57
-#if TCG_TARGET_REG_BITS == 64
24
- return int128_getlo(x) >> 32;
58
- qatomic_set((uint64_t *)rw, make_pair(i0, i1));
59
- flush_idcache_range(rx, rw, 8);
60
-#else
61
- qemu_build_not_reached();
62
-#endif
63
-}
25
-}
64
-
26
-
65
-static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw,
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
66
- tcg_insn_unit i0, tcg_insn_unit i1,
28
{
67
- tcg_insn_unit i2, tcg_insn_unit i3)
29
- Int128 a, b;
68
-{
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
69
- uint64_t p[2];
31
+ uint64_t l, h;
32
33
- a = int128_make64(ai);
34
- b = int128_make64(bi);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
70
-
39
-
71
- p[!HOST_BIG_ENDIAN] = make_pair(i0, i1);
40
- pp1s = pp1a + pp1b;
72
- p[HOST_BIG_ENDIAN] = make_pair(i2, i3);
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
73
-
42
- pp2 += (1ULL << 32);
74
- /*
43
- }
75
- * There's no convenient way to get the compiler to allocate a pair
44
- uint64_t ret_low = pp0 + (pp1s << 32);
76
- * of registers at an even index, so copy into r6/r7 and clobber.
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
77
- */
46
- pp2 += 1;
78
- asm("mr %%r6, %1\n\t"
79
- "mr %%r7, %2\n\t"
80
- "stq %%r6, %0"
81
- : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7");
82
- flush_idcache_range(rx, rw, 16);
83
-}
84
-
85
-void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
86
- uintptr_t jmp_rx, uintptr_t jmp_rw)
87
-{
88
- tcg_insn_unit i0, i1, i2, i3;
89
- uintptr_t addr = tb->jmp_target_addr[n];
90
- intptr_t tb_diff = addr - (uintptr_t)tb->tc.ptr;
91
- intptr_t br_diff = addr - (jmp_rx + 4);
92
- intptr_t lo, hi;
93
-
94
- if (TCG_TARGET_REG_BITS == 32) {
95
- intptr_t diff = addr - jmp_rx;
96
- tcg_debug_assert(in_range_b(diff));
97
- qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
98
- flush_idcache_range(jmp_rx, jmp_rw, 4);
99
- return;
100
- }
47
- }
101
-
48
-
102
- /*
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
103
- * For 16-bit displacements, we can use a single add + branch.
50
+ mulu64(&l, &h, ai, bi);
104
- * This happens quite often.
51
+ return int128_make128(l, h);
105
- */
106
- if (tb_diff == (int16_t)tb_diff) {
107
- i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
108
- i1 = B | (br_diff & 0x3fffffc);
109
- ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
110
- return;
111
- }
112
-
113
- lo = (int16_t)tb_diff;
114
- hi = (int32_t)(tb_diff - lo);
115
- assert(tb_diff == hi + lo);
116
- i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
117
- i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
118
-
119
- /*
120
- * Without stq from 2.07, we can only update two insns,
121
- * and those must be the ones that load the target address.
122
- */
123
- if (!have_isa_2_07) {
124
- ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
125
- return;
126
- }
127
-
128
- /*
129
- * For 26-bit displacements, we can use a direct branch.
130
- * Otherwise we still need the indirect branch, which we
131
- * must restore after a potential direct branch write.
132
- */
133
- br_diff -= 4;
134
- if (in_range_b(br_diff)) {
135
- i2 = B | (br_diff & 0x3fffffc);
136
- i3 = NOP;
137
- } else {
138
- i2 = MTSPR | RS(TCG_REG_TB) | CTR;
139
- i3 = BCCTR | BO_ALWAYS;
140
- }
141
- ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3);
142
-}
143
-
144
static void tcg_out_call_int(TCGContext *s, int lk,
145
const tcg_insn_unit *target)
146
{
147
@@ -XXX,XX +XXX,XX @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
148
149
static void tcg_out_goto_tb(TCGContext *s, int which)
150
{
151
- /* Direct jump. */
152
- if (TCG_TARGET_REG_BITS == 64) {
153
- /* Ensure the next insns are 8 or 16-byte aligned. */
154
- while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
155
- tcg_out32(s, NOP);
156
- }
157
+ uintptr_t ptr = get_jmp_target_addr(s, which);
158
+
159
+ if (USE_REG_TB) {
160
+ ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
161
+ tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
162
+
163
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
164
set_jmp_insn_offset(s, which);
165
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
166
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
167
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
168
+
169
+ /* When branch is out of range, fall through to indirect. */
170
+ tcg_out32(s, BCCTR | BO_ALWAYS);
171
+
172
+ /* For the unlinked case, need to reset TCG_REG_TB. */
173
+ set_jmp_reset_offset(s, which);
174
+ tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
175
+ -tcg_current_code_size(s));
176
+ } else {
177
+ /* Direct branch will be patched by tb_target_set_jmp_target. */
178
+ set_jmp_insn_offset(s, which);
179
+ tcg_out32(s, NOP);
180
+
181
+ /* When branch is out of range, fall through to indirect. */
182
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
183
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
184
+ tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
185
tcg_out32(s, BCCTR | BO_ALWAYS);
186
set_jmp_reset_offset(s, which);
187
- if (USE_REG_TB) {
188
- /* For the unlinked case, need to reset TCG_REG_TB. */
189
- tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
190
- -tcg_current_code_size(s));
191
- }
192
- } else {
193
- set_jmp_insn_offset(s, which);
194
- tcg_out32(s, B);
195
- set_jmp_reset_offset(s, which);
196
}
197
}
52
}
198
53
199
+void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
200
+ uintptr_t jmp_rx, uintptr_t jmp_rw)
201
+{
202
+ uintptr_t addr = tb->jmp_target_addr[n];
203
+ intptr_t diff = addr - jmp_rx;
204
+ tcg_insn_unit insn;
205
+
206
+ if (in_range_b(diff)) {
207
+ insn = B | (diff & 0x3fffffc);
208
+ } else if (USE_REG_TB) {
209
+ insn = MTSPR | RS(TCG_REG_TB) | CTR;
210
+ } else {
211
+ insn = NOP;
212
+ }
213
+
214
+ qatomic_set((uint32_t *)jmp_rw, insn);
215
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
216
+}
217
+
218
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
219
const TCGArg args[TCG_MAX_OP_ARGS],
220
const int const_args[TCG_MAX_OP_ARGS])
221
--
55
--
222
2.34.1
56
2.43.0
223
224
diff view generated by jsdifflib
1
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
3
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
6
---
4
tcg/ppc/tcg-target.c.inc | 32 +++++++++++++-------------------
7
target/hexagon/fma_emu.c | 2 +-
5
1 file changed, 13 insertions(+), 19 deletions(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
6
9
7
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
8
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/ppc/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.c
10
+++ b/tcg/ppc/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.c
11
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
12
15
float64_is_infinity(b)) {
13
switch (opc) {
16
return float64_mul(a, b, fp_status);
14
case INDEX_op_goto_tb:
17
}
15
- if (s->tb_jmp_insn_offset) {
18
- x.mant = int128_mul_6464(accumulated, 1);
16
- /* Direct jump. */
19
+ x.mant = int128_make64(accumulated);
17
- if (TCG_TARGET_REG_BITS == 64) {
20
x.sticky = sticky;
18
- /* Ensure the next insns are 8 or 16-byte aligned. */
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
19
- while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
20
- tcg_out32(s, NOP);
21
- }
22
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
23
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
24
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
25
- } else {
26
- s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
27
- tcg_out32(s, B);
28
- s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
29
- break;
30
+ qemu_build_assert(TCG_TARGET_HAS_direct_jump);
31
+ /* Direct jump. */
32
+ if (TCG_TARGET_REG_BITS == 64) {
33
+ /* Ensure the next insns are 8 or 16-byte aligned. */
34
+ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
35
+ tcg_out32(s, NOP);
36
}
37
+ s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
38
+ tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
39
+ tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
40
} else {
41
- /* Indirect jump. */
42
- tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
43
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
44
- (intptr_t)(s->tb_jmp_insn_offset + args[0]));
45
+ s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
46
+ tcg_out32(s, B);
47
+ s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
48
+ break;
49
}
50
tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
51
tcg_out32(s, BCCTR | BO_ALWAYS);
52
--
23
--
53
2.34.1
24
2.43.0
54
55
diff view generated by jsdifflib
1
This will shortly be used for more than reset.
1
Convert all targets simultaneously, as the gen_intermediate_code
2
function disappears from the target. While there are possible
3
workarounds, they're larger than simply performing the conversion.
2
4
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
include/exec/exec-all.h | 2 +-
8
include/exec/translator.h | 14 --------------
8
accel/tcg/translate-all.c | 8 ++++----
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
9
tcg/tcg.c | 4 ++--
10
target/alpha/cpu.h | 2 ++
10
3 files changed, 7 insertions(+), 7 deletions(-)
11
target/arm/internals.h | 2 ++
12
target/avr/cpu.h | 2 ++
13
target/hexagon/cpu.h | 2 ++
14
target/hppa/cpu.h | 2 ++
15
target/i386/tcg/helper-tcg.h | 2 ++
16
target/loongarch/internals.h | 2 ++
17
target/m68k/cpu.h | 2 ++
18
target/microblaze/cpu.h | 2 ++
19
target/mips/tcg/tcg-internal.h | 2 ++
20
target/openrisc/cpu.h | 2 ++
21
target/ppc/cpu.h | 2 ++
22
target/riscv/cpu.h | 3 +++
23
target/rx/cpu.h | 2 ++
24
target/s390x/s390x-internal.h | 2 ++
25
target/sh4/cpu.h | 2 ++
26
target/sparc/cpu.h | 2 ++
27
target/tricore/cpu.h | 2 ++
28
target/xtensa/cpu.h | 2 ++
29
accel/tcg/cpu-exec.c | 8 +++++---
30
accel/tcg/translate-all.c | 8 +++++---
31
target/alpha/cpu.c | 1 +
32
target/alpha/translate.c | 4 ++--
33
target/arm/cpu.c | 1 +
34
target/arm/tcg/cpu-v7m.c | 1 +
35
target/arm/tcg/translate.c | 5 ++---
36
target/avr/cpu.c | 1 +
37
target/avr/translate.c | 6 +++---
38
target/hexagon/cpu.c | 1 +
39
target/hexagon/translate.c | 4 ++--
40
target/hppa/cpu.c | 1 +
41
target/hppa/translate.c | 4 ++--
42
target/i386/tcg/tcg-cpu.c | 1 +
43
target/i386/tcg/translate.c | 5 ++---
44
target/loongarch/cpu.c | 1 +
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
11
71
12
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
13
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/exec-all.h
74
--- a/include/exec/translator.h
15
+++ b/include/exec/exec-all.h
75
+++ b/include/exec/translator.h
16
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
76
@@ -XXX,XX +XXX,XX @@
17
* setting one of the jump targets (or patching the jump instruction). Only
77
#include "qemu/bswap.h"
18
* two of such jumps are supported.
78
#include "exec/vaddr.h"
79
80
-/**
81
- * gen_intermediate_code
82
- * @cpu: cpu context
83
- * @tb: translation block
84
- * @max_insns: max number of instructions to translate
85
- * @pc: guest virtual program counter address
86
- * @host_pc: host physical program counter address
87
- *
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
90
- */
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
92
- vaddr pc, void *host_pc);
93
-
94
/**
95
* DisasJumpType:
96
* @DISAS_NEXT: Next instruction in program order.
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
102
* Called when the first CPU is realized.
19
*/
103
*/
20
+#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
104
void (*initialize)(void);
21
uint16_t jmp_reset_offset[2]; /* offset of original jump target */
105
+ /**
22
-#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
106
+ * @translate_code: Translate guest instructions to TCGOps
23
uintptr_t jmp_target_arg[2]; /* target address or offset */
107
+ * @cpu: cpu context
24
108
+ * @tb: translation block
25
/*
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
112
+ *
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
115
+ */
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
117
+ int *max_insns, vaddr pc, void *host_pc);
118
/**
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
120
*
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/alpha/cpu.h
124
+++ b/target/alpha/cpu.h
125
@@ -XXX,XX +XXX,XX @@ enum {
126
};
127
128
void alpha_translate_init(void);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
130
+ int *max_insns, vaddr pc, void *host_pc);
131
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
133
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/target/arm/internals.h
137
+++ b/target/arm/internals.h
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
141
void arm_translate_init(void);
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
143
+ int *max_insns, vaddr pc, void *host_pc);
144
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
287
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
289
uint32_t exception, uintptr_t pc);
290
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
374
375
if (!tcg_target_initialized) {
376
/* Check mandatory TCGCPUOps handlers */
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
387
tcg_target_initialized = true;
388
}
389
26
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
27
index XXXXXXX..XXXXXXX 100644
391
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/translate-all.c
392
--- a/accel/tcg/translate-all.c
29
+++ b/accel/tcg/translate-all.c
393
+++ b/accel/tcg/translate-all.c
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
395
396
tcg_func_start(tcg_ctx);
397
398
- tcg_ctx->cpu = env_cpu(env);
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
400
+ CPUState *cs = env_cpu(env);
401
+ tcg_ctx->cpu = cs;
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
403
+
404
assert(tb->size != 0);
405
tcg_ctx->cpu = NULL;
406
*max_insns = tb->icount;
30
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
31
tb->jmp_dest[1] = (uintptr_t)NULL;
408
/*
32
409
* Overflow of code_gen_buffer, or the current slice of it.
33
/* init original jump addresses which have been set during tcg_gen_code() */
410
*
34
- if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
35
+ if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
36
tb_reset_jump(tb, 0);
413
* should we re-do the tcg optimization currently hidden
37
}
414
* inside tcg_gen_code. All that should be required is to
38
- if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
415
* flush the TBs, allocate a new TB, re-initialize it per
39
+ if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
40
tb_reset_jump(tb, 1);
417
index XXXXXXX..XXXXXXX 100644
41
}
418
--- a/target/alpha/cpu.c
42
419
+++ b/target/alpha/cpu.c
43
@@ -XXX,XX +XXX,XX @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
44
if (tb_page_addr1(tb) != -1) {
421
45
tst->cross_page++;
422
static const TCGCPUOps alpha_tcg_ops = {
46
}
423
.initialize = alpha_translate_init,
47
- if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
424
+ .translate_code = alpha_translate_code,
48
+ if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
49
tst->direct_jmp_count++;
426
.restore_state_to_opc = alpha_restore_state_to_opc,
50
- if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
427
51
+ if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
52
tst->direct_jmp2_count++;
429
index XXXXXXX..XXXXXXX 100644
53
}
430
--- a/target/alpha/translate.c
54
}
431
+++ b/target/alpha/translate.c
55
diff --git a/tcg/tcg.c b/tcg/tcg.c
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
56
index XXXXXXX..XXXXXXX 100644
433
.tb_stop = alpha_tr_tb_stop,
57
--- a/tcg/tcg.c
434
};
58
+++ b/tcg/tcg.c
435
59
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
437
- vaddr pc, void *host_pc)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
439
+ int *max_insns, vaddr pc, void *host_pc)
440
{
441
DisasContext dc;
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
448
#ifdef CONFIG_TCG
449
static const TCGCPUOps arm_tcg_ops = {
450
.initialize = arm_translate_init,
451
+ .translate_code = arm_translate_code,
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
453
.debug_excp_handler = arm_debug_excp_handler,
454
.restore_state_to_opc = arm_restore_state_to_opc,
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
456
index XXXXXXX..XXXXXXX 100644
457
--- a/target/arm/tcg/cpu-v7m.c
458
+++ b/target/arm/tcg/cpu-v7m.c
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
460
461
static const TCGCPUOps arm_v7m_tcg_ops = {
462
.initialize = arm_translate_init,
463
+ .translate_code = arm_translate_code,
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
465
.debug_excp_handler = arm_debug_excp_handler,
466
.restore_state_to_opc = arm_restore_state_to_opc,
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
468
index XXXXXXX..XXXXXXX 100644
469
--- a/target/arm/tcg/translate.c
470
+++ b/target/arm/tcg/translate.c
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
472
.tb_stop = arm_tr_tb_stop,
473
};
474
475
-/* generate intermediate code for basic block 'tb'. */
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
477
- vaddr pc, void *host_pc)
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
479
+ int *max_insns, vaddr pc, void *host_pc)
480
{
481
DisasContext dc = { };
482
const TranslatorOps *ops = &arm_translator_ops;
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
488
489
static const TCGCPUOps avr_tcg_ops = {
490
.initialize = avr_cpu_tcg_init,
491
+ .translate_code = avr_cpu_translate_code,
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
493
.restore_state_to_opc = avr_restore_state_to_opc,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/avr/translate.c
498
+++ b/target/avr/translate.c
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
500
*
501
* - translate()
502
* - canonicalize_skip()
503
- * - gen_intermediate_code()
504
+ * - translate_code()
505
* - restore_state_to_opc()
506
*
507
*/
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
509
.tb_stop = avr_tr_tb_stop,
510
};
511
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
513
- vaddr pc, void *host_pc)
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
515
+ int *max_insns, vaddr pc, void *host_pc)
516
{
517
DisasContext dc = { };
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
520
index XXXXXXX..XXXXXXX 100644
521
--- a/target/hexagon/cpu.c
522
+++ b/target/hexagon/cpu.c
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
524
525
static const TCGCPUOps hexagon_tcg_ops = {
526
.initialize = hexagon_translate_init,
527
+ .translate_code = hexagon_translate_code,
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
530
};
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
532
index XXXXXXX..XXXXXXX 100644
533
--- a/target/hexagon/translate.c
534
+++ b/target/hexagon/translate.c
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
536
.tb_stop = hexagon_tr_tb_stop,
537
};
538
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
540
- vaddr pc, void *host_pc)
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
542
+ int *max_insns, vaddr pc, void *host_pc)
543
{
544
DisasContext ctx;
545
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
547
index XXXXXXX..XXXXXXX 100644
548
--- a/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
551
552
static const TCGCPUOps hppa_tcg_ops = {
553
.initialize = hppa_translate_init,
554
+ .translate_code = hppa_translate_code,
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
556
.restore_state_to_opc = hppa_restore_state_to_opc,
557
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
559
index XXXXXXX..XXXXXXX 100644
560
--- a/target/hppa/translate.c
561
+++ b/target/hppa/translate.c
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
60
#endif
563
#endif
61
564
};
62
/* Initialize goto_tb jump offsets. */
565
63
- tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
64
- tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
567
- vaddr pc, void *host_pc)
65
+ tb->jmp_reset_offset[0] = TB_JMP_OFFSET_INVALID;
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
66
+ tb->jmp_reset_offset[1] = TB_JMP_OFFSET_INVALID;
569
+ int *max_insns, vaddr pc, void *host_pc)
67
tcg_ctx->tb_jmp_reset_offset = tb->jmp_reset_offset;
570
{
68
if (TCG_TARGET_HAS_direct_jump) {
571
DisasContext ctx = { };
69
tcg_ctx->tb_jmp_insn_offset = tb->jmp_target_arg;
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
578
579
static const TCGCPUOps x86_tcg_ops = {
580
.initialize = tcg_x86_init,
581
+ .translate_code = x86_translate_code,
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
583
.restore_state_to_opc = x86_restore_state_to_opc,
584
.cpu_exec_enter = x86_cpu_exec_enter,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
586
index XXXXXXX..XXXXXXX 100644
587
--- a/target/i386/tcg/translate.c
588
+++ b/target/i386/tcg/translate.c
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
590
.tb_stop = i386_tr_tb_stop,
591
};
592
593
-/* generate intermediate code for basic block 'tb'. */
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
595
- vaddr pc, void *host_pc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
597
+ int *max_insns, vaddr pc, void *host_pc)
598
{
599
DisasContext dc;
600
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
602
index XXXXXXX..XXXXXXX 100644
603
--- a/target/loongarch/cpu.c
604
+++ b/target/loongarch/cpu.c
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
606
607
static const TCGCPUOps loongarch_tcg_ops = {
608
.initialize = loongarch_translate_init,
609
+ .translate_code = loongarch_translate_code,
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
612
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
614
index XXXXXXX..XXXXXXX 100644
615
--- a/target/loongarch/tcg/translate.c
616
+++ b/target/loongarch/tcg/translate.c
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
618
.tb_stop = loongarch_tr_tb_stop,
619
};
620
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
622
- vaddr pc, void *host_pc)
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
624
+ int *max_insns, vaddr pc, void *host_pc)
625
{
626
DisasContext ctx;
627
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
629
index XXXXXXX..XXXXXXX 100644
630
--- a/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
633
634
static const TCGCPUOps m68k_tcg_ops = {
635
.initialize = m68k_tcg_init,
636
+ .translate_code = m68k_translate_code,
637
.restore_state_to_opc = m68k_restore_state_to_opc,
638
639
#ifndef CONFIG_USER_ONLY
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/m68k/translate.c
643
+++ b/target/m68k/translate.c
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
645
.tb_stop = m68k_tr_tb_stop,
646
};
647
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
649
- vaddr pc, void *host_pc)
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
651
+ int *max_insns, vaddr pc, void *host_pc)
652
{
653
DisasContext dc;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
656
index XXXXXXX..XXXXXXX 100644
657
--- a/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
660
661
static const TCGCPUOps mb_tcg_ops = {
662
.initialize = mb_tcg_init,
663
+ .translate_code = mb_translate_code,
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
665
.restore_state_to_opc = mb_restore_state_to_opc,
666
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
668
index XXXXXXX..XXXXXXX 100644
669
--- a/target/microblaze/translate.c
670
+++ b/target/microblaze/translate.c
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
672
.tb_stop = mb_tr_tb_stop,
673
};
674
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
676
- vaddr pc, void *host_pc)
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
678
+ int *max_insns, vaddr pc, void *host_pc)
679
{
680
DisasContext dc;
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
683
index XXXXXXX..XXXXXXX 100644
684
--- a/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
687
#include "hw/core/tcg-cpu-ops.h"
688
static const TCGCPUOps mips_tcg_ops = {
689
.initialize = mips_tcg_init,
690
+ .translate_code = mips_translate_code,
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
692
.restore_state_to_opc = mips_restore_state_to_opc,
693
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
695
index XXXXXXX..XXXXXXX 100644
696
--- a/target/mips/tcg/translate.c
697
+++ b/target/mips/tcg/translate.c
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
699
.tb_stop = mips_tr_tb_stop,
700
};
701
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
703
- vaddr pc, void *host_pc)
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
705
+ int *max_insns, vaddr pc, void *host_pc)
706
{
707
DisasContext ctx;
708
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
710
index XXXXXXX..XXXXXXX 100644
711
--- a/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
714
715
static const TCGCPUOps openrisc_tcg_ops = {
716
.initialize = openrisc_translate_init,
717
+ .translate_code = openrisc_translate_code,
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
720
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
722
index XXXXXXX..XXXXXXX 100644
723
--- a/target/openrisc/translate.c
724
+++ b/target/openrisc/translate.c
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
726
.tb_stop = openrisc_tr_tb_stop,
727
};
728
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
730
- vaddr pc, void *host_pc)
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
732
+ int *max_insns, vaddr pc, void *host_pc)
733
{
734
DisasContext ctx;
735
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
737
index XXXXXXX..XXXXXXX 100644
738
--- a/target/ppc/cpu_init.c
739
+++ b/target/ppc/cpu_init.c
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
741
742
static const TCGCPUOps ppc_tcg_ops = {
743
.initialize = ppc_translate_init,
744
+ .translate_code = ppc_translate_code,
745
.restore_state_to_opc = ppc_restore_state_to_opc,
746
747
#ifdef CONFIG_USER_ONLY
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
749
index XXXXXXX..XXXXXXX 100644
750
--- a/target/ppc/translate.c
751
+++ b/target/ppc/translate.c
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
753
.tb_stop = ppc_tr_tb_stop,
754
};
755
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
757
- vaddr pc, void *host_pc)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
759
+ int *max_insns, vaddr pc, void *host_pc)
760
{
761
DisasContext ctx;
762
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
764
index XXXXXXX..XXXXXXX 100644
765
--- a/target/riscv/tcg/tcg-cpu.c
766
+++ b/target/riscv/tcg/tcg-cpu.c
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
768
769
static const TCGCPUOps riscv_tcg_ops = {
770
.initialize = riscv_translate_init,
771
+ .translate_code = riscv_translate_code,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
774
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
776
index XXXXXXX..XXXXXXX 100644
777
--- a/target/riscv/translate.c
778
+++ b/target/riscv/translate.c
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
780
.tb_stop = riscv_tr_tb_stop,
781
};
782
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
784
- vaddr pc, void *host_pc)
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
786
+ int *max_insns, vaddr pc, void *host_pc)
787
{
788
DisasContext ctx;
789
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
791
index XXXXXXX..XXXXXXX 100644
792
--- a/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
795
796
static const TCGCPUOps rx_tcg_ops = {
797
.initialize = rx_translate_init,
798
+ .translate_code = rx_translate_code,
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
800
.restore_state_to_opc = rx_restore_state_to_opc,
801
.tlb_fill = rx_cpu_tlb_fill,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
803
index XXXXXXX..XXXXXXX 100644
804
--- a/target/rx/translate.c
805
+++ b/target/rx/translate.c
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
807
.tb_stop = rx_tr_tb_stop,
808
};
809
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
811
- vaddr pc, void *host_pc)
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
813
+ int *max_insns, vaddr pc, void *host_pc)
814
{
815
DisasContext dc;
816
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
818
index XXXXXXX..XXXXXXX 100644
819
--- a/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
822
823
static const TCGCPUOps s390_tcg_ops = {
824
.initialize = s390x_translate_init,
825
+ .translate_code = s390x_translate_code,
826
.restore_state_to_opc = s390x_restore_state_to_opc,
827
828
#ifdef CONFIG_USER_ONLY
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
830
index XXXXXXX..XXXXXXX 100644
831
--- a/target/s390x/tcg/translate.c
832
+++ b/target/s390x/tcg/translate.c
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
834
.disas_log = s390x_tr_disas_log,
835
};
836
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
838
- vaddr pc, void *host_pc)
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
840
+ int *max_insns, vaddr pc, void *host_pc)
841
{
842
DisasContext dc;
843
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
849
850
static const TCGCPUOps superh_tcg_ops = {
851
.initialize = sh4_translate_init,
852
+ .translate_code = sh4_translate_code,
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
854
.restore_state_to_opc = superh_restore_state_to_opc,
855
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
857
index XXXXXXX..XXXXXXX 100644
858
--- a/target/sh4/translate.c
859
+++ b/target/sh4/translate.c
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
861
.tb_stop = sh4_tr_tb_stop,
862
};
863
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
865
- vaddr pc, void *host_pc)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
867
+ int *max_insns, vaddr pc, void *host_pc)
868
{
869
DisasContext ctx;
870
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
872
index XXXXXXX..XXXXXXX 100644
873
--- a/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
876
877
static const TCGCPUOps sparc_tcg_ops = {
878
.initialize = sparc_tcg_init,
879
+ .translate_code = sparc_translate_code,
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
881
.restore_state_to_opc = sparc_restore_state_to_opc,
882
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
884
index XXXXXXX..XXXXXXX 100644
885
--- a/target/sparc/translate.c
886
+++ b/target/sparc/translate.c
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
888
.tb_stop = sparc_tr_tb_stop,
889
};
890
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
892
- vaddr pc, void *host_pc)
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
894
+ int *max_insns, vaddr pc, void *host_pc)
895
{
896
DisasContext dc = {};
897
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
899
index XXXXXXX..XXXXXXX 100644
900
--- a/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
903
904
static const TCGCPUOps tricore_tcg_ops = {
905
.initialize = tricore_tcg_init,
906
+ .translate_code = tricore_translate_code,
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
908
.restore_state_to_opc = tricore_restore_state_to_opc,
909
.tlb_fill = tricore_cpu_tlb_fill,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
911
index XXXXXXX..XXXXXXX 100644
912
--- a/target/tricore/translate.c
913
+++ b/target/tricore/translate.c
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
915
.tb_stop = tricore_tr_tb_stop,
916
};
917
918
-
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
920
- vaddr pc, void *host_pc)
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
922
+ int *max_insns, vaddr pc, void *host_pc)
923
{
924
DisasContext ctx;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
927
index XXXXXXX..XXXXXXX 100644
928
--- a/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
931
932
static const TCGCPUOps xtensa_tcg_ops = {
933
.initialize = xtensa_translate_init,
934
+ .translate_code = xtensa_translate_code,
935
.debug_excp_handler = xtensa_breakpoint_handler,
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
937
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
939
index XXXXXXX..XXXXXXX 100644
940
--- a/target/xtensa/translate.c
941
+++ b/target/xtensa/translate.c
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
943
.tb_stop = xtensa_tr_tb_stop,
944
};
945
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
947
- vaddr pc, void *host_pc)
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
949
+ int *max_insns, vaddr pc, void *host_pc)
950
{
951
DisasContext dc = {};
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
70
--
953
--
71
2.34.1
954
2.43.0
72
955
73
956
diff view generated by jsdifflib