1
The following changes since commit d0dddab40e472ba62b5f43f11cc7dba085dabe71:
1
The following changes since commit aa3a285b5bc56a4208b3b57d4a55291e9c260107:
2
2
3
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging (2021-02-05 15:27:02 +0000)
3
Merge tag 'mem-2024-12-21' of https://github.com/davidhildenbrand/qemu into staging (2024-12-22 14:33:27 -0500)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210205
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20241224
8
8
9
for you to fetch changes up to fb6916dd6ca8bb4b42d44baba9c67ecaf2279577:
9
for you to fetch changes up to e4a8e093dc74be049f4829831dce76e5edab0003:
10
10
11
accel: introduce AccelCPUClass extending CPUClass (2021-02-05 10:24:15 -1000)
11
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core (2024-12-24 08:32:15 -0800)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
TCGCPUOps cleanups (claudio)
14
tcg/optimize: Remove in-flight mask data from OptContext
15
tcg/s390 compare fix (phil)
15
fpu: Add float*_muladd_scalbn
16
tcg/aarch64 rotli_vec fix
16
fpu: Remove float_muladd_halve_result
17
tcg/tci cleanups and fixes
17
fpu: Add float_round_nearest_even_max
18
fpu: Add float_muladd_suppress_add_product_zero
19
target/hexagon: Use float32_muladd
20
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
18
21
19
----------------------------------------------------------------
22
----------------------------------------------------------------
20
Claudio Fontana (13):
23
Ilya Leoshkevich (1):
21
target/riscv: remove CONFIG_TCG, as it is always TCG
24
tests/tcg: Do not use inttypes.h in multiarch/system/memory.c
22
accel/tcg: split TCG-only code from cpu_exec_realizefn
23
target/arm: do not use cc->do_interrupt for KVM directly
24
cpu: move cc->do_interrupt to tcg_ops
25
cpu: move cc->transaction_failed to tcg_ops
26
cpu: move do_unaligned_access to tcg_ops
27
physmem: make watchpoint checking code TCG-only
28
cpu: move adjust_watchpoint_address to tcg_ops
29
cpu: move debug_check_watchpoint to tcg_ops
30
cpu: tcg_ops: move to tcg-cpu-ops.h, keep a pointer in CPUClass
31
accel: extend AccelState and AccelClass to user-mode
32
accel: replace struct CpusAccel with AccelOpsClass
33
accel: introduce AccelCPUClass extending CPUClass
34
25
35
Eduardo Habkost (5):
26
Pierrick Bouvier (1):
36
cpu: Introduce TCGCpuOperations struct
27
plugins: optimize cpu_index code generation
37
cpu: Move synchronize_from_tb() to tcg_ops
38
cpu: Move cpu_exec_* to tcg_ops
39
cpu: Move tlb_fill to tcg_ops
40
cpu: Move debug_excp_handler to tcg_ops
41
28
42
Philippe Mathieu-Daudé (2):
29
Richard Henderson (70):
43
tcg/s390: Fix compare instruction from extended-immediate facility
30
tcg/optimize: Split out finish_bb, finish_ebb
44
exec/cpu-defs: Remove TCG backends dependency
31
tcg/optimize: Split out fold_affected_mask
32
tcg/optimize: Copy mask writeback to fold_masks
33
tcg/optimize: Split out fold_masks_zs
34
tcg/optimize: Augment s_mask from z_mask in fold_masks_zs
35
tcg/optimize: Change representation of s_mask
36
tcg/optimize: Use finish_folding in fold_add, fold_add_vec, fold_addsub2
37
tcg/optimize: Introduce const value accessors for TempOptInfo
38
tcg/optimize: Use fold_masks_zs in fold_and
39
tcg/optimize: Use fold_masks_zs in fold_andc
40
tcg/optimize: Use fold_masks_zs in fold_bswap
41
tcg/optimize: Use fold_masks_zs in fold_count_zeros
42
tcg/optimize: Use fold_masks_z in fold_ctpop
43
tcg/optimize: Use fold_and and fold_masks_z in fold_deposit
44
tcg/optimize: Compute sign mask in fold_deposit
45
tcg/optimize: Use finish_folding in fold_divide
46
tcg/optimize: Use finish_folding in fold_dup, fold_dup2
47
tcg/optimize: Use fold_masks_s in fold_eqv
48
tcg/optimize: Use fold_masks_z in fold_extract
49
tcg/optimize: Use finish_folding in fold_extract2
50
tcg/optimize: Use fold_masks_zs in fold_exts
51
tcg/optimize: Use fold_masks_z in fold_extu
52
tcg/optimize: Use fold_masks_zs in fold_movcond
53
tcg/optimize: Use finish_folding in fold_mul*
54
tcg/optimize: Use fold_masks_s in fold_nand
55
tcg/optimize: Use fold_masks_z in fold_neg_no_const
56
tcg/optimize: Use fold_masks_s in fold_nor
57
tcg/optimize: Use fold_masks_s in fold_not
58
tcg/optimize: Use fold_masks_zs in fold_or
59
tcg/optimize: Use fold_masks_zs in fold_orc
60
tcg/optimize: Use fold_masks_zs in fold_qemu_ld
61
tcg/optimize: Return true from fold_qemu_st, fold_tcg_st
62
tcg/optimize: Use finish_folding in fold_remainder
63
tcg/optimize: Distinguish simplification in fold_setcond_zmask
64
tcg/optimize: Use fold_masks_z in fold_setcond
65
tcg/optimize: Use fold_masks_s in fold_negsetcond
66
tcg/optimize: Use fold_masks_z in fold_setcond2
67
tcg/optimize: Use finish_folding in fold_cmp_vec
68
tcg/optimize: Use finish_folding in fold_cmpsel_vec
69
tcg/optimize: Use fold_masks_zs in fold_sextract
70
tcg/optimize: Use fold_masks_zs, fold_masks_s in fold_shift
71
tcg/optimize: Simplify sign bit test in fold_shift
72
tcg/optimize: Use finish_folding in fold_sub, fold_sub_vec
73
tcg/optimize: Use fold_masks_zs in fold_tcg_ld
74
tcg/optimize: Use finish_folding in fold_tcg_ld_memcopy
75
tcg/optimize: Use fold_masks_zs in fold_xor
76
tcg/optimize: Use finish_folding in fold_bitsel_vec
77
tcg/optimize: Use finish_folding as default in tcg_optimize
78
tcg/optimize: Remove z_mask, s_mask from OptContext
79
tcg/optimize: Re-enable sign-mask optimizations
80
tcg/optimize: Move fold_bitsel_vec into alphabetic sort
81
tcg/optimize: Move fold_cmp_vec, fold_cmpsel_vec into alphabetic sort
82
softfloat: Add float{16,32,64}_muladd_scalbn
83
target/arm: Use float*_muladd_scalbn
84
target/sparc: Use float*_muladd_scalbn
85
softfloat: Remove float_muladd_halve_result
86
softfloat: Add float_round_nearest_even_max
87
softfloat: Add float_muladd_suppress_add_product_zero
88
target/hexagon: Use float32_mul in helper_sfmpy
89
target/hexagon: Use float32_muladd for helper_sffma
90
target/hexagon: Use float32_muladd for helper_sffms
91
target/hexagon: Use float32_muladd_scalbn for helper_sffma_sc
92
target/hexagon: Use float32_muladd for helper_sffm[as]_lib
93
target/hexagon: Remove internal_fmafx
94
target/hexagon: Expand GEN_XF_ROUND
95
target/hexagon: Remove Float
96
target/hexagon: Remove Double
97
target/hexagon: Use mulu64 for int128_mul_6464
98
target/hexagon: Simplify internal_mpyhh setup
99
accel/tcg: Move gen_intermediate_code to TCGCPUOps.translate_core
45
100
46
Richard Henderson (24):
101
include/exec/translator.h | 14 -
47
tcg/aarch64: Do not convert TCGArg to temps that are not temps
102
include/fpu/softfloat-types.h | 2 +
48
configure: Fix --enable-tcg-interpreter
103
include/fpu/softfloat.h | 14 +-
49
tcg/tci: Make tci_tb_ptr thread-local
104
include/hw/core/tcg-cpu-ops.h | 13 +
50
tcg/tci: Inline tci_write_reg32s into the only caller
105
target/alpha/cpu.h | 2 +
51
tcg/tci: Inline tci_write_reg8 into its callers
106
target/arm/internals.h | 2 +
52
tcg/tci: Inline tci_write_reg16 into the only caller
107
target/avr/cpu.h | 2 +
53
tcg/tci: Inline tci_write_reg32 into all callers
108
target/hexagon/cpu.h | 2 +
54
tcg/tci: Inline tci_write_reg64 into 64-bit callers
109
target/hexagon/fma_emu.h | 3 -
55
tcg/tci: Merge INDEX_op_ld8u_{i32,i64}
110
target/hppa/cpu.h | 2 +
56
tcg/tci: Merge INDEX_op_ld8s_{i32,i64}
111
target/i386/tcg/helper-tcg.h | 2 +
57
tcg/tci: Merge INDEX_op_ld16u_{i32,i64}
112
target/loongarch/internals.h | 2 +
58
tcg/tci: Merge INDEX_op_ld16s_{i32,i64}
113
target/m68k/cpu.h | 2 +
59
tcg/tci: Merge INDEX_op_{ld_i32,ld32u_i64}
114
target/microblaze/cpu.h | 2 +
60
tcg/tci: Merge INDEX_op_st8_{i32,i64}
115
target/mips/tcg/tcg-internal.h | 2 +
61
tcg/tci: Merge INDEX_op_st16_{i32,i64}
116
target/openrisc/cpu.h | 2 +
62
tcg/tci: Move stack bounds check to compile-time
117
target/ppc/cpu.h | 2 +
63
tcg/tci: Merge INDEX_op_{st_i32,st32_i64}
118
target/riscv/cpu.h | 3 +
64
tcg/tci: Use g_assert_not_reached
119
target/rx/cpu.h | 2 +
65
tcg/tci: Remove dead code for TCG_TARGET_HAS_div2_*
120
target/s390x/s390x-internal.h | 2 +
66
tcg/tci: Implement 64-bit division
121
target/sh4/cpu.h | 2 +
67
tcg/tci: Remove TODO as unused
122
target/sparc/cpu.h | 2 +
68
tcg/tci: Restrict TCG_TARGET_NB_REGS to 16
123
target/sparc/helper.h | 4 +-
69
tcg/tci: Fix TCG_REG_R4 misusage
124
target/tricore/cpu.h | 2 +
70
tcg/tci: Remove TCG_CONST
125
target/xtensa/cpu.h | 2 +
71
126
accel/tcg/cpu-exec.c | 8 +-
72
Stefan Weil (2):
127
accel/tcg/plugin-gen.c | 9 +
73
tcg/tci: Implement INDEX_op_ld16s_i32
128
accel/tcg/translate-all.c | 8 +-
74
tcg/tci: Implement INDEX_op_ld8s_i64
129
fpu/softfloat.c | 63 +--
75
130
target/alpha/cpu.c | 1 +
76
configure | 5 +-
131
target/alpha/translate.c | 4 +-
77
accel/accel-softmmu.h | 15 +
132
target/arm/cpu.c | 1 +
78
accel/kvm/kvm-cpus.h | 2 -
133
target/arm/tcg/cpu-v7m.c | 1 +
79
.../{tcg-cpus-icount.h => tcg-accel-ops-icount.h} | 2 +
134
target/arm/tcg/helper-a64.c | 6 +-
80
accel/tcg/tcg-accel-ops-mttcg.h | 19 +
135
target/arm/tcg/translate.c | 5 +-
81
accel/tcg/{tcg-cpus-rr.h => tcg-accel-ops-rr.h} | 0
136
target/avr/cpu.c | 1 +
82
accel/tcg/{tcg-cpus.h => tcg-accel-ops.h} | 6 +-
137
target/avr/translate.c | 6 +-
83
include/exec/cpu-all.h | 11 +-
138
target/hexagon/cpu.c | 1 +
84
include/exec/cpu-defs.h | 3 -
139
target/hexagon/fma_emu.c | 496 ++++++---------------
85
include/exec/exec-all.h | 2 +-
140
target/hexagon/op_helper.c | 125 ++----
86
include/hw/boards.h | 2 +-
141
target/hexagon/translate.c | 4 +-
87
include/hw/core/accel-cpu.h | 38 ++
142
target/hppa/cpu.c | 1 +
88
include/hw/core/cpu.h | 86 +---
143
target/hppa/translate.c | 4 +-
89
include/hw/core/tcg-cpu-ops.h | 97 +++++
144
target/i386/tcg/tcg-cpu.c | 1 +
90
include/{sysemu => qemu}/accel.h | 16 +-
145
target/i386/tcg/translate.c | 5 +-
91
include/sysemu/accel-ops.h | 45 ++
146
target/loongarch/cpu.c | 1 +
92
include/sysemu/cpus.h | 26 +-
147
target/loongarch/tcg/translate.c | 4 +-
93
include/sysemu/hvf.h | 2 +-
148
target/m68k/cpu.c | 1 +
94
include/sysemu/kvm.h | 2 +-
149
target/m68k/translate.c | 4 +-
95
include/sysemu/kvm_int.h | 2 +-
150
target/microblaze/cpu.c | 1 +
96
target/arm/internals.h | 6 +
151
target/microblaze/translate.c | 4 +-
97
target/i386/hax/{hax-cpus.h => hax-accel-ops.h} | 2 -
152
target/mips/cpu.c | 1 +
98
target/i386/hax/hax-windows.h | 2 +-
153
target/mips/tcg/translate.c | 4 +-
99
target/i386/hvf/{hvf-cpus.h => hvf-accel-ops.h} | 2 -
154
target/openrisc/cpu.c | 1 +
100
target/i386/hvf/hvf-i386.h | 2 +-
155
target/openrisc/translate.c | 4 +-
101
target/i386/whpx/{whpx-cpus.h => whpx-accel-ops.h} | 2 -
156
target/ppc/cpu_init.c | 1 +
102
tcg/tci/tcg-target-con-set.h | 6 +-
157
target/ppc/translate.c | 4 +-
103
tcg/tci/tcg-target.h | 37 +-
158
target/riscv/tcg/tcg-cpu.c | 1 +
104
accel/accel-common.c | 105 +++++
159
target/riscv/translate.c | 4 +-
105
accel/{accel.c => accel-softmmu.c} | 61 ++-
160
target/rx/cpu.c | 1 +
106
accel/accel-user.c | 24 ++
161
target/rx/translate.c | 4 +-
107
accel/kvm/{kvm-cpus.c => kvm-accel-ops.c} | 28 +-
162
target/s390x/cpu.c | 1 +
108
accel/kvm/kvm-all.c | 2 -
163
target/s390x/tcg/translate.c | 4 +-
109
accel/qtest/qtest.c | 25 +-
164
target/sh4/cpu.c | 1 +
110
accel/tcg/cpu-exec.c | 53 ++-
165
target/sh4/translate.c | 4 +-
111
accel/tcg/cputlb.c | 34 +-
166
target/sparc/cpu.c | 1 +
112
.../{tcg-cpus-icount.c => tcg-accel-ops-icount.c} | 21 +-
167
target/sparc/fop_helper.c | 8 +-
113
.../{tcg-cpus-mttcg.c => tcg-accel-ops-mttcg.c} | 14 +-
168
target/sparc/translate.c | 84 ++--
114
accel/tcg/{tcg-cpus-rr.c => tcg-accel-ops-rr.c} | 13 +-
169
target/tricore/cpu.c | 1 +
115
accel/tcg/{tcg-cpus.c => tcg-accel-ops.c} | 47 +-
170
target/tricore/translate.c | 5 +-
116
accel/tcg/tcg-all.c | 19 +-
171
target/xtensa/cpu.c | 1 +
117
accel/tcg/user-exec.c | 8 +-
172
target/xtensa/translate.c | 4 +-
118
accel/xen/xen-all.c | 26 +-
173
tcg/optimize.c | 857 +++++++++++++++++++-----------------
119
bsd-user/main.c | 11 +-
174
tests/tcg/multiarch/system/memory.c | 9 +-
120
cpu.c | 66 +--
175
fpu/softfloat-parts.c.inc | 16 +-
121
hw/core/cpu.c | 21 +-
176
75 files changed, 866 insertions(+), 1009 deletions(-)
122
hw/mips/jazz.c | 12 +-
123
linux-user/main.c | 7 +-
124
softmmu/cpus.c | 12 +-
125
softmmu/memory.c | 2 +-
126
softmmu/physmem.c | 149 ++++---
127
softmmu/qtest.c | 2 +-
128
softmmu/vl.c | 9 +-
129
target/alpha/cpu.c | 21 +-
130
target/arm/cpu.c | 45 +-
131
target/arm/cpu64.c | 4 +-
132
target/arm/cpu_tcg.c | 32 +-
133
target/arm/helper.c | 4 +
134
target/arm/kvm64.c | 6 +-
135
target/avr/cpu.c | 19 +-
136
target/avr/helper.c | 5 +-
137
target/cris/cpu.c | 43 +-
138
target/cris/helper.c | 5 +-
139
target/hppa/cpu.c | 24 +-
140
target/i386/hax/{hax-cpus.c => hax-accel-ops.c} | 33 +-
141
target/i386/hax/hax-all.c | 7 +-
142
target/i386/hax/hax-mem.c | 2 +-
143
target/i386/hax/hax-posix.c | 2 +-
144
target/i386/hax/hax-windows.c | 2 +-
145
target/i386/hvf/{hvf-cpus.c => hvf-accel-ops.c} | 29 +-
146
target/i386/hvf/hvf.c | 5 +-
147
target/i386/hvf/x86_task.c | 2 +-
148
target/i386/hvf/x86hvf.c | 2 +-
149
target/i386/tcg/tcg-cpu.c | 26 +-
150
target/i386/whpx/{whpx-cpus.c => whpx-accel-ops.c} | 33 +-
151
target/i386/whpx/whpx-all.c | 9 +-
152
target/lm32/cpu.c | 19 +-
153
target/m68k/cpu.c | 19 +-
154
target/microblaze/cpu.c | 25 +-
155
target/mips/cpu.c | 35 +-
156
target/moxie/cpu.c | 15 +-
157
target/nios2/cpu.c | 18 +-
158
target/openrisc/cpu.c | 17 +-
159
target/riscv/cpu.c | 26 +-
160
target/riscv/cpu_helper.c | 2 +-
161
target/rx/cpu.c | 20 +-
162
target/s390x/cpu.c | 33 +-
163
target/s390x/excp_helper.c | 2 +-
164
target/sh4/cpu.c | 21 +-
165
target/sparc/cpu.c | 25 +-
166
target/tilegx/cpu.c | 17 +-
167
target/tricore/cpu.c | 12 +-
168
target/unicore32/cpu.c | 17 +-
169
target/xtensa/cpu.c | 23 +-
170
target/xtensa/helper.c | 4 +-
171
tcg/tcg-common.c | 4 -
172
tcg/tci.c | 479 ++++++++-------------
173
target/ppc/translate_init.c.inc | 39 +-
174
tcg/aarch64/tcg-target.c.inc | 7 +-
175
tcg/s390/tcg-target.c.inc | 2 +-
176
tcg/tci/tcg-target.c.inc | 149 ++-----
177
MAINTAINERS | 7 +-
178
accel/kvm/meson.build | 2 +-
179
accel/meson.build | 4 +-
180
accel/tcg/meson.build | 10 +-
181
target/i386/hax/meson.build | 2 +-
182
target/i386/hvf/meson.build | 2 +-
183
target/i386/whpx/meson.build | 2 +-
184
108 files changed, 1565 insertions(+), 1065 deletions(-)
185
create mode 100644 accel/accel-softmmu.h
186
rename accel/tcg/{tcg-cpus-icount.h => tcg-accel-ops-icount.h} (88%)
187
create mode 100644 accel/tcg/tcg-accel-ops-mttcg.h
188
rename accel/tcg/{tcg-cpus-rr.h => tcg-accel-ops-rr.h} (100%)
189
rename accel/tcg/{tcg-cpus.h => tcg-accel-ops.h} (72%)
190
create mode 100644 include/hw/core/accel-cpu.h
191
create mode 100644 include/hw/core/tcg-cpu-ops.h
192
rename include/{sysemu => qemu}/accel.h (94%)
193
create mode 100644 include/sysemu/accel-ops.h
194
rename target/i386/hax/{hax-cpus.h => hax-accel-ops.h} (95%)
195
rename target/i386/hvf/{hvf-cpus.h => hvf-accel-ops.h} (94%)
196
rename target/i386/whpx/{whpx-cpus.h => whpx-accel-ops.h} (96%)
197
create mode 100644 accel/accel-common.c
198
rename accel/{accel.c => accel-softmmu.c} (64%)
199
create mode 100644 accel/accel-user.c
200
rename accel/kvm/{kvm-cpus.c => kvm-accel-ops.c} (72%)
201
rename accel/tcg/{tcg-cpus-icount.c => tcg-accel-ops-icount.c} (89%)
202
rename accel/tcg/{tcg-cpus-mttcg.c => tcg-accel-ops-mttcg.c} (92%)
203
rename accel/tcg/{tcg-cpus-rr.c => tcg-accel-ops-rr.c} (97%)
204
rename accel/tcg/{tcg-cpus.c => tcg-accel-ops.c} (63%)
205
rename target/i386/hax/{hax-cpus.c => hax-accel-ops.c} (69%)
206
rename target/i386/hvf/{hvf-cpus.c => hvf-accel-ops.c} (84%)
207
rename target/i386/whpx/{whpx-cpus.c => whpx-accel-ops.c} (71%)
208
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
make check-tcg fails on Fedora with the following error message:
4
5
alpha-linux-gnu-gcc [...] qemu/tests/tcg/multiarch/system/memory.c -o memory [...]
6
qemu/tests/tcg/multiarch/system/memory.c:17:10: fatal error: inttypes.h: No such file or directory
7
17 | #include <inttypes.h>
8
| ^~~~~~~~~~~~
9
compilation terminated.
10
11
The reason is that Fedora has cross-compilers, but no cross-glibc
12
headers. Fix by hardcoding the format specifiers and dropping the
13
include.
14
15
An alternative fix would be to introduce a configure check for
16
inttypes.h. But this would make it impossible to use Fedora
17
cross-compilers for softmmu tests, which used to work so far.
18
19
Fixes: ecbcc9ead2f8 ("tests/tcg: add a system test to check memory instrumentation")
20
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
21
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
22
Message-ID: <20241010085906.226249-1-iii@linux.ibm.com>
23
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
24
---
25
tests/tcg/multiarch/system/memory.c | 9 ++++-----
26
1 file changed, 4 insertions(+), 5 deletions(-)
27
28
diff --git a/tests/tcg/multiarch/system/memory.c b/tests/tcg/multiarch/system/memory.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tests/tcg/multiarch/system/memory.c
31
+++ b/tests/tcg/multiarch/system/memory.c
32
@@ -XXX,XX +XXX,XX @@
33
34
#include <stdint.h>
35
#include <stdbool.h>
36
-#include <inttypes.h>
37
#include <minilib.h>
38
39
#ifndef CHECK_UNALIGNED
40
@@ -XXX,XX +XXX,XX @@ int main(void)
41
int i;
42
bool ok = true;
43
44
- ml_printf("Test data start: 0x%"PRIxPTR"\n", &test_data[0]);
45
- ml_printf("Test data end: 0x%"PRIxPTR"\n", &test_data[TEST_SIZE]);
46
+ ml_printf("Test data start: 0x%lx\n", (unsigned long)&test_data[0]);
47
+ ml_printf("Test data end: 0x%lx\n", (unsigned long)&test_data[TEST_SIZE]);
48
49
/* Run through the unsigned tests first */
50
for (i = 0; i < ARRAY_SIZE(init_ufns) && ok; i++) {
51
@@ -XXX,XX +XXX,XX @@ int main(void)
52
ok = do_signed_reads(true);
53
}
54
55
- ml_printf("Test data read: %"PRId32"\n", test_read_count);
56
- ml_printf("Test data write: %"PRId32"\n", test_write_count);
57
+ ml_printf("Test data read: %lu\n", (unsigned long)test_read_count);
58
+ ml_printf("Test data write: %lu\n", (unsigned long)test_write_count);
59
ml_printf("Test complete: %s\n", ok ? "PASSED" : "FAILED");
60
return ok ? 0 : -1;
61
}
62
--
63
2.43.0
diff view generated by jsdifflib
New patch
1
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
1
2
3
When running with a single vcpu, we can return a constant instead of a
4
load when accessing cpu_index.
5
A side effect is that all tcg operations using it are optimized, most
6
notably scoreboard access.
7
When running a simple loop in user-mode, the speedup is around 20%.
8
9
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-ID: <20241128213843.1023080-1-pierrick.bouvier@linaro.org>
13
---
14
accel/tcg/plugin-gen.c | 9 +++++++++
15
1 file changed, 9 insertions(+)
16
17
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/accel/tcg/plugin-gen.c
20
+++ b/accel/tcg/plugin-gen.c
21
@@ -XXX,XX +XXX,XX @@ static void gen_disable_mem_helper(void)
22
23
static TCGv_i32 gen_cpu_index(void)
24
{
25
+ /*
26
+ * Optimize when we run with a single vcpu. All values using cpu_index,
27
+ * including scoreboard index, will be optimized out.
28
+ * User-mode calls tb_flush when setting this flag. In system-mode, all
29
+ * vcpus are created before generating code.
30
+ */
31
+ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) {
32
+ return tcg_constant_i32(current_cpu->cpu_index);
33
+ }
34
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
35
tcg_gen_ld_i32(cpu_index, tcg_env,
36
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
37
--
38
2.43.0
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Call them directly from the opcode switch statement in tcg_optimize,
2
rather than in finish_folding based on opcode flags. Adjust folding
3
of conditional branches to match.
2
4
3
move away TCG-only code, make it compile only on TCG.
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
[claudio: moved the prototypes from hw/core/cpu.h to exec/cpu-all.h]
8
Signed-off-by: Claudio Fontana <cfontana@suse.de>
9
Message-Id: <20210204163931.7358-4-cfontana@suse.de>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
7
---
12
include/exec/cpu-all.h | 11 +++++--
8
tcg/optimize.c | 47 +++++++++++++++++++++++++++++++----------------
13
include/hw/core/cpu.h | 2 ++
9
1 file changed, 31 insertions(+), 16 deletions(-)
14
accel/tcg/cpu-exec.c | 28 +++++++++++++++++
15
cpu.c | 70 ++++++++++++++++++++----------------------
16
hw/core/cpu.c | 6 +++-
17
5 files changed, 77 insertions(+), 40 deletions(-)
18
10
19
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
21
--- a/include/exec/cpu-all.h
13
--- a/tcg/optimize.c
22
+++ b/include/exec/cpu-all.h
14
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr)
15
@@ -XXX,XX +XXX,XX @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
16
}
24
}
17
}
25
18
26
#ifdef CONFIG_TCG
19
+static void finish_bb(OptContext *ctx)
27
+/* accel/tcg/cpu-exec.c */
28
void dump_drift_info(void);
29
+/* accel/tcg/translate-all.c */
30
void dump_exec_info(void);
31
void dump_opcount_info(void);
32
#endif /* CONFIG_TCG */
33
34
#endif /* !CONFIG_USER_ONLY */
35
36
+#ifdef CONFIG_TCG
37
+/* accel/tcg/cpu-exec.c */
38
+int cpu_exec(CPUState *cpu);
39
+void tcg_exec_realizefn(CPUState *cpu, Error **errp);
40
+void tcg_exec_unrealizefn(CPUState *cpu);
41
+#endif /* CONFIG_TCG */
42
+
43
/* Returns: 0 on success, -1 on error */
44
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
45
void *ptr, target_ulong len, bool is_write);
46
47
-int cpu_exec(CPUState *cpu);
48
-
49
/**
50
* cpu_set_cpustate_pointers(cpu)
51
* @cpu: The cpu object
52
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/include/hw/core/cpu.h
55
+++ b/include/hw/core/cpu.h
56
@@ -XXX,XX +XXX,XX @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
57
58
void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
59
GCC_FMT_ATTR(2, 3);
60
+
61
+/* $(top_srcdir)/cpu.c */
62
void cpu_exec_initfn(CPUState *cpu);
63
void cpu_exec_realizefn(CPUState *cpu, Error **errp);
64
void cpu_exec_unrealizefn(CPUState *cpu);
65
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/accel/tcg/cpu-exec.c
68
+++ b/accel/tcg/cpu-exec.c
69
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
70
return ret;
71
}
72
73
+void tcg_exec_realizefn(CPUState *cpu, Error **errp)
74
+{
20
+{
75
+ static bool tcg_target_initialized;
21
+ /* We only optimize memory barriers across basic blocks. */
76
+ CPUClass *cc = CPU_GET_CLASS(cpu);
22
+ ctx->prev_mb = NULL;
77
+
78
+ if (!tcg_target_initialized) {
79
+ cc->tcg_ops.initialize();
80
+ tcg_target_initialized = true;
81
+ }
82
+ tlb_init(cpu);
83
+ qemu_plugin_vcpu_init_hook(cpu);
84
+
85
+#ifndef CONFIG_USER_ONLY
86
+ tcg_iommu_init_notifier_list(cpu);
87
+#endif /* !CONFIG_USER_ONLY */
88
+}
23
+}
89
+
24
+
90
+/* undo the initializations in reverse order */
25
+static void finish_ebb(OptContext *ctx)
91
+void tcg_exec_unrealizefn(CPUState *cpu)
92
+{
26
+{
93
+#ifndef CONFIG_USER_ONLY
27
+ finish_bb(ctx);
94
+ tcg_iommu_free_notifier_list(cpu);
28
+ /* We only optimize across extended basic blocks. */
95
+#endif /* !CONFIG_USER_ONLY */
29
+ memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
96
+
30
+ remove_mem_copy_all(ctx);
97
+ qemu_plugin_vcpu_exit_hook(cpu);
98
+ tlb_destroy(cpu);
99
+}
31
+}
100
+
32
+
101
#ifndef CONFIG_USER_ONLY
33
static void finish_folding(OptContext *ctx, TCGOp *op)
102
103
void dump_drift_info(void)
104
diff --git a/cpu.c b/cpu.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/cpu.c
107
+++ b/cpu.c
108
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_cpu_common = {
109
};
110
#endif
111
112
-void cpu_exec_unrealizefn(CPUState *cpu)
113
+void cpu_exec_realizefn(CPUState *cpu, Error **errp)
114
{
34
{
115
CPUClass *cc = CPU_GET_CLASS(cpu);
35
const TCGOpDef *def = &tcg_op_defs[op->opc];
116
36
int i, nb_oargs;
117
- tlb_destroy(cpu);
37
118
- cpu_list_remove(cpu);
38
- /*
119
+ cpu_list_add(cpu);
39
- * We only optimize extended basic blocks. If the opcode ends a BB
120
+
40
- * and is not a conditional branch, reset all temp data.
121
+#ifdef CONFIG_TCG
41
- */
122
+ /* NB: errp parameter is unused currently */
42
- if (def->flags & TCG_OPF_BB_END) {
123
+ if (tcg_enabled()) {
43
- ctx->prev_mb = NULL;
124
+ tcg_exec_realizefn(cpu, errp);
44
- if (!(def->flags & TCG_OPF_COND_BRANCH)) {
125
+ }
45
- memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
126
+#endif /* CONFIG_TCG */
46
- remove_mem_copy_all(ctx);
127
+
47
- }
128
+#ifdef CONFIG_USER_ONLY
48
- return;
129
+ assert(cc->vmsd == NULL);
130
+#else
131
+ if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
132
+ vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
133
+ }
134
+ if (cc->vmsd != NULL) {
135
+ vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
136
+ }
137
+#endif /* CONFIG_USER_ONLY */
138
+}
139
+
140
+void cpu_exec_unrealizefn(CPUState *cpu)
141
+{
142
+ CPUClass *cc = CPU_GET_CLASS(cpu);
143
144
#ifdef CONFIG_USER_ONLY
145
assert(cc->vmsd == NULL);
146
@@ -XXX,XX +XXX,XX @@ void cpu_exec_unrealizefn(CPUState *cpu)
147
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
148
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
149
}
150
- tcg_iommu_free_notifier_list(cpu);
151
#endif
152
+#ifdef CONFIG_TCG
153
+ /* NB: errp parameter is unused currently */
154
+ if (tcg_enabled()) {
155
+ tcg_exec_unrealizefn(cpu);
156
+ }
157
+#endif /* CONFIG_TCG */
158
+
159
+ cpu_list_remove(cpu);
160
}
161
162
void cpu_exec_initfn(CPUState *cpu)
163
@@ -XXX,XX +XXX,XX @@ void cpu_exec_initfn(CPUState *cpu)
164
#endif
165
}
166
167
-void cpu_exec_realizefn(CPUState *cpu, Error **errp)
168
-{
169
- CPUClass *cc = CPU_GET_CLASS(cpu);
170
-#ifdef CONFIG_TCG
171
- static bool tcg_target_initialized;
172
-#endif /* CONFIG_TCG */
173
-
174
- cpu_list_add(cpu);
175
-
176
-#ifdef CONFIG_TCG
177
- if (tcg_enabled() && !tcg_target_initialized) {
178
- tcg_target_initialized = true;
179
- cc->tcg_ops.initialize();
180
- }
181
-#endif /* CONFIG_TCG */
182
- tlb_init(cpu);
183
-
184
- qemu_plugin_vcpu_init_hook(cpu);
185
-
186
-#ifdef CONFIG_USER_ONLY
187
- assert(cc->vmsd == NULL);
188
-#else /* !CONFIG_USER_ONLY */
189
- if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
190
- vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
191
- }
192
- if (cc->vmsd != NULL) {
193
- vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
194
- }
49
- }
195
-
50
-
196
- tcg_iommu_init_notifier_list(cpu);
51
nb_oargs = def->nb_oargs;
197
-#endif
52
for (i = 0; i < nb_oargs; i++) {
198
-}
53
TCGTemp *ts = arg_temp(op->args[i]);
199
-
54
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
200
const char *parse_cpu_option(const char *cpu_option)
55
if (i > 0) {
201
{
56
op->opc = INDEX_op_br;
202
ObjectClass *oc;
57
op->args[0] = op->args[3];
203
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
58
+ finish_ebb(ctx);
204
index XXXXXXX..XXXXXXX 100644
59
+ } else {
205
--- a/hw/core/cpu.c
60
+ finish_bb(ctx);
206
+++ b/hw/core/cpu.c
61
}
207
@@ -XXX,XX +XXX,XX @@ static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
62
- return false;
208
return target_words_bigendian();
63
+ return true;
209
}
64
}
210
65
211
+/*
66
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
212
+ * XXX the following #if is always true because this is a common_ss
67
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
213
+ * module, so target CONFIG_* is never defined.
68
}
214
+ */
69
op->opc = INDEX_op_br;
215
#if !defined(CONFIG_USER_ONLY)
70
op->args[0] = label;
216
GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
71
- break;
217
{
72
+ finish_ebb(ctx);
218
@@ -XXX,XX +XXX,XX @@ static void cpu_common_realizefn(DeviceState *dev, Error **errp)
73
+ return true;
219
static void cpu_common_unrealizefn(DeviceState *dev)
74
}
220
{
75
- return false;
221
CPUState *cpu = CPU(dev);
222
+
76
+
223
/* NOTE: latest generic point before the cpu is fully unrealized */
77
+ finish_bb(ctx);
224
trace_fini_vcpu(cpu);
78
+ return true;
225
- qemu_plugin_vcpu_exit_hook(cpu);
226
cpu_exec_unrealizefn(cpu);
227
}
79
}
228
80
81
static bool fold_bswap(OptContext *ctx, TCGOp *op)
82
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
83
CASE_OP_32_64_VEC(xor):
84
done = fold_xor(&ctx, op);
85
break;
86
+ case INDEX_op_set_label:
87
+ case INDEX_op_br:
88
+ case INDEX_op_exit_tb:
89
+ case INDEX_op_goto_tb:
90
+ case INDEX_op_goto_ptr:
91
+ finish_ebb(&ctx);
92
+ done = true;
93
+ break;
94
default:
95
break;
96
}
229
--
97
--
230
2.25.1
98
2.43.0
231
232
diff view generated by jsdifflib
1
This was removed from tcg_target_reg_alloc_order and
1
There are only a few logical operations which can compute
2
tcg_target_call_iarg_regs on the assumption that it
2
an "affected" mask. Split out handling of this optimization
3
was the stack. This was incorrectly copied from i386.
3
to a separate function, only to be called when applicable.
4
For tci, the stack is R15.
5
4
6
By adding R4 back to tcg_target_call_iarg_regs, adjust the other
5
Remove the a_mask field from OptContext, as the mask is
7
entries so that 6 (or 12) entries are still present in the array,
6
no longer stored anywhere.
8
and adjust the numbers in the interpreter.
9
7
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
10
---
13
tcg/tci.c | 8 ++++----
11
tcg/optimize.c | 42 +++++++++++++++++++++++++++---------------
14
tcg/tci/tcg-target.c.inc | 7 +------
12
1 file changed, 27 insertions(+), 15 deletions(-)
15
2 files changed, 5 insertions(+), 10 deletions(-)
16
13
17
diff --git a/tcg/tci.c b/tcg/tci.c
14
diff --git a/tcg/optimize.c b/tcg/optimize.c
18
index XXXXXXX..XXXXXXX 100644
15
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tci.c
16
--- a/tcg/optimize.c
20
+++ b/tcg/tci.c
17
+++ b/tcg/optimize.c
21
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
18
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
22
tci_read_reg(regs, TCG_REG_R1),
19
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
23
tci_read_reg(regs, TCG_REG_R2),
20
24
tci_read_reg(regs, TCG_REG_R3),
21
/* In flight values from optimization. */
25
+ tci_read_reg(regs, TCG_REG_R4),
22
- uint64_t a_mask; /* mask bit is 0 iff value identical to first input */
26
tci_read_reg(regs, TCG_REG_R5),
23
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
27
tci_read_reg(regs, TCG_REG_R6),
24
uint64_t s_mask; /* mask of clrsb(value) bits */
28
tci_read_reg(regs, TCG_REG_R7),
25
TCGType type;
29
tci_read_reg(regs, TCG_REG_R8),
26
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
30
tci_read_reg(regs, TCG_REG_R9),
27
31
tci_read_reg(regs, TCG_REG_R10),
28
static bool fold_masks(OptContext *ctx, TCGOp *op)
32
- tci_read_reg(regs, TCG_REG_R11),
29
{
33
- tci_read_reg(regs, TCG_REG_R12));
30
- uint64_t a_mask = ctx->a_mask;
34
+ tci_read_reg(regs, TCG_REG_R11));
31
uint64_t z_mask = ctx->z_mask;
35
tci_write_reg(regs, TCG_REG_R0, tmp64);
32
uint64_t s_mask = ctx->s_mask;
36
tci_write_reg(regs, TCG_REG_R1, tmp64 >> 32);
33
37
#else
34
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
38
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
35
* type changing opcodes.
39
tci_read_reg(regs, TCG_REG_R1),
36
*/
40
tci_read_reg(regs, TCG_REG_R2),
37
if (ctx->type == TCG_TYPE_I32) {
41
tci_read_reg(regs, TCG_REG_R3),
38
- a_mask = (int32_t)a_mask;
42
- tci_read_reg(regs, TCG_REG_R5),
39
z_mask = (int32_t)z_mask;
43
- tci_read_reg(regs, TCG_REG_R6));
40
s_mask |= MAKE_64BIT_MASK(32, 32);
44
+ tci_read_reg(regs, TCG_REG_R4),
41
ctx->z_mask = z_mask;
45
+ tci_read_reg(regs, TCG_REG_R5));
42
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
46
tci_write_reg(regs, TCG_REG_R0, tmp64);
43
if (z_mask == 0) {
47
#endif
44
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
48
break;
45
}
49
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
46
+ return false;
50
index XXXXXXX..XXXXXXX 100644
47
+}
51
--- a/tcg/tci/tcg-target.c.inc
48
+
52
+++ b/tcg/tci/tcg-target.c.inc
49
+/*
53
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
50
+ * An "affected" mask bit is 0 if and only if the result is identical
54
TCG_REG_R1,
51
+ * to the first input. Thus if the entire mask is 0, the operation
55
TCG_REG_R2,
52
+ * is equivalent to a copy.
56
TCG_REG_R3,
53
+ */
57
-#if 0 /* used for TCG_REG_CALL_STACK */
54
+static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
58
TCG_REG_R4,
55
+{
59
-#endif
56
+ if (ctx->type == TCG_TYPE_I32) {
60
TCG_REG_R5,
57
+ a_mask = (uint32_t)a_mask;
61
TCG_REG_R6,
58
+ }
62
TCG_REG_R7,
59
if (a_mask == 0) {
63
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_iarg_regs[] = {
60
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
64
TCG_REG_R1,
61
}
65
TCG_REG_R2,
62
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
66
TCG_REG_R3,
63
* Known-zeros does not imply known-ones. Therefore unless
67
-#if 0 /* used for TCG_REG_CALL_STACK */
64
* arg2 is constant, we can't infer affected bits from it.
68
TCG_REG_R4,
65
*/
69
-#endif
66
- if (arg_is_const(op->args[2])) {
70
TCG_REG_R5,
67
- ctx->a_mask = z1 & ~z2;
71
- TCG_REG_R6,
68
+ if (arg_is_const(op->args[2]) &&
72
#if TCG_TARGET_REG_BITS == 32
69
+ fold_affected_mask(ctx, op, z1 & ~z2)) {
73
/* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */
70
+ return true;
74
+ TCG_REG_R6,
71
}
75
TCG_REG_R7,
72
76
TCG_REG_R8,
73
return fold_masks(ctx, op);
77
TCG_REG_R9,
74
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
78
TCG_REG_R10,
75
*/
79
TCG_REG_R11,
76
if (arg_is_const(op->args[2])) {
80
- TCG_REG_R12,
77
uint64_t z2 = ~arg_info(op->args[2])->z_mask;
81
#endif
78
- ctx->a_mask = z1 & ~z2;
82
};
79
+ if (fold_affected_mask(ctx, op, z1 & ~z2)) {
80
+ return true;
81
+ }
82
z1 &= z2;
83
}
84
ctx->z_mask = z1;
85
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
86
87
z_mask_old = arg_info(op->args[1])->z_mask;
88
z_mask = extract64(z_mask_old, pos, len);
89
- if (pos == 0) {
90
- ctx->a_mask = z_mask_old ^ z_mask;
91
+ if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
92
+ return true;
93
}
94
ctx->z_mask = z_mask;
95
ctx->s_mask = smask_from_zmask(z_mask);
96
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
97
98
ctx->z_mask = z_mask;
99
ctx->s_mask = s_mask;
100
- if (!type_change) {
101
- ctx->a_mask = s_mask & ~s_mask_old;
102
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
103
+ return true;
104
}
105
106
return fold_masks(ctx, op);
107
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
108
109
ctx->z_mask = z_mask;
110
ctx->s_mask = smask_from_zmask(z_mask);
111
- if (!type_change) {
112
- ctx->a_mask = z_mask_old ^ z_mask;
113
+ if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
114
+ return true;
115
}
116
return fold_masks(ctx, op);
117
}
118
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
119
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
120
ctx->s_mask = s_mask;
121
122
- if (pos == 0) {
123
- ctx->a_mask = s_mask & ~s_mask_old;
124
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
125
+ return true;
126
}
127
128
return fold_masks(ctx, op);
129
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
130
}
131
132
/* Assume all bits affected, no bits known zero, no sign reps. */
133
- ctx.a_mask = -1;
134
ctx.z_mask = -1;
135
ctx.s_mask = 0;
83
136
84
--
137
--
85
2.25.1
138
2.43.0
86
87
diff view generated by jsdifflib
New patch
1
Use of fold_masks should be restricted to those opcodes that
2
can reliably make use of it -- those with a single output,
3
and from higher-level folders that set up the masks.
4
Prepare for conversion of each folder in turn.
1
5
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/optimize.c | 17 ++++++++++++++---
10
1 file changed, 14 insertions(+), 3 deletions(-)
11
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/optimize.c
15
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask = ctx->z_mask;
19
uint64_t s_mask = ctx->s_mask;
20
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
21
+ TCGTemp *ts;
22
+ TempOptInfo *ti;
23
+
24
+ /* Only single-output opcodes are supported here. */
25
+ tcg_debug_assert(def->nb_oargs == 1);
26
27
/*
28
* 32-bit ops generate 32-bit results, which for the purpose of
29
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
30
if (ctx->type == TCG_TYPE_I32) {
31
z_mask = (int32_t)z_mask;
32
s_mask |= MAKE_64BIT_MASK(32, 32);
33
- ctx->z_mask = z_mask;
34
- ctx->s_mask = s_mask;
35
}
36
37
if (z_mask == 0) {
38
return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
39
}
40
- return false;
41
+
42
+ ts = arg_temp(op->args[0]);
43
+ reset_ts(ctx, ts);
44
+
45
+ ti = ts_info(ts);
46
+ ti->z_mask = z_mask;
47
+ ti->s_mask = s_mask;
48
+ return true;
49
}
50
51
/*
52
--
53
2.43.0
diff view generated by jsdifflib
New patch
1
Add a routine to which masks can be passed directly, rather than
2
storing them into OptContext. To be used in upcoming patches.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 15 ++++++++++++---
8
1 file changed, 12 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
15
return fold_const2(ctx, op);
16
}
17
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
19
+/*
20
+ * Record "zero" and "sign" masks for the single output of @op.
21
+ * See TempOptInfo definition of z_mask and s_mask.
22
+ * If z_mask allows, fold the output to constant zero.
23
+ */
24
+static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
25
+ uint64_t z_mask, uint64_t s_mask)
26
{
27
- uint64_t z_mask = ctx->z_mask;
28
- uint64_t s_mask = ctx->s_mask;
29
const TCGOpDef *def = &tcg_op_defs[op->opc];
30
TCGTemp *ts;
31
TempOptInfo *ti;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_masks(OptContext *ctx, TCGOp *op)
33
return true;
34
}
35
36
+static bool fold_masks(OptContext *ctx, TCGOp *op)
37
+{
38
+ return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
39
+}
40
+
41
/*
42
* An "affected" mask bit is 0 if and only if the result is identical
43
* to the first input. Thus if the entire mask is 0, the operation
44
--
45
2.43.0
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Consider the passed s_mask to be a minimum deduced from
2
either existing s_mask or from a sign-extension operation.
3
We may be able to deduce more from the set of known zeros.
4
Remove identical logic from several opcode folders.
2
5
3
Signed-off-by: Claudio Fontana <cfontana@suse.de>
6
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-Id: <20210204163931.7358-10-cfontana@suse.de>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
include/hw/core/cpu.h | 4 ++--
9
tcg/optimize.c | 21 ++++++---------------
10
accel/tcg/cpu-exec.c | 4 ++--
10
1 file changed, 6 insertions(+), 15 deletions(-)
11
target/alpha/cpu.c | 2 +-
12
target/arm/cpu.c | 4 ++--
13
target/arm/cpu_tcg.c | 9 ++++-----
14
target/avr/cpu.c | 2 +-
15
target/avr/helper.c | 4 ++--
16
target/cris/cpu.c | 12 ++++++------
17
target/cris/helper.c | 4 ++--
18
target/hppa/cpu.c | 2 +-
19
target/i386/tcg/tcg-cpu.c | 2 +-
20
target/lm32/cpu.c | 2 +-
21
target/m68k/cpu.c | 2 +-
22
target/microblaze/cpu.c | 2 +-
23
target/mips/cpu.c | 4 ++--
24
target/moxie/cpu.c | 2 +-
25
target/nios2/cpu.c | 2 +-
26
target/openrisc/cpu.c | 2 +-
27
target/riscv/cpu.c | 2 +-
28
target/rx/cpu.c | 2 +-
29
target/s390x/cpu.c | 2 +-
30
target/sh4/cpu.c | 2 +-
31
target/sparc/cpu.c | 2 +-
32
target/tilegx/cpu.c | 2 +-
33
target/unicore32/cpu.c | 2 +-
34
target/xtensa/cpu.c | 2 +-
35
target/ppc/translate_init.c.inc | 2 +-
36
27 files changed, 41 insertions(+), 42 deletions(-)
37
11
38
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
12
diff --git a/tcg/optimize.c b/tcg/optimize.c
39
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
40
--- a/include/hw/core/cpu.h
14
--- a/tcg/optimize.c
41
+++ b/include/hw/core/cpu.h
15
+++ b/tcg/optimize.c
42
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
16
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
43
void (*cpu_exec_exit)(CPUState *cpu);
17
* Record "zero" and "sign" masks for the single output of @op.
44
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
18
* See TempOptInfo definition of z_mask and s_mask.
45
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
19
* If z_mask allows, fold the output to constant zero.
46
+ /** @do_interrupt: Callback for interrupt handling. */
20
+ * The passed s_mask may be augmented by z_mask.
47
+ void (*do_interrupt)(CPUState *cpu);
21
*/
48
/**
22
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
49
* @tlb_fill: Handle a softmmu tlb miss or user-only address fault
23
uint64_t z_mask, uint64_t s_mask)
50
*
24
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
51
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
25
52
* @parse_features: Callback to parse command line arguments.
26
ti = ts_info(ts);
53
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
27
ti->z_mask = z_mask;
54
* @has_work: Callback for checking if there is work to do.
28
- ti->s_mask = s_mask;
55
- * @do_interrupt: Callback for interrupt handling.
29
+ ti->s_mask = s_mask | smask_from_zmask(z_mask);
56
* @do_unaligned_access: Callback for unaligned access handling, if
57
* the target defines #TARGET_ALIGNED_ONLY.
58
* @do_transaction_failed: Callback for handling failed memory transactions
59
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
60
61
int reset_dump_flags;
62
bool (*has_work)(CPUState *cpu);
63
- void (*do_interrupt)(CPUState *cpu);
64
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
65
MMUAccessType access_type,
66
int mmu_idx, uintptr_t retaddr);
67
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/accel/tcg/cpu-exec.c
70
+++ b/accel/tcg/cpu-exec.c
71
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
72
loop */
73
#if defined(TARGET_I386)
74
CPUClass *cc = CPU_GET_CLASS(cpu);
75
- cc->do_interrupt(cpu);
76
+ cc->tcg_ops.do_interrupt(cpu);
77
#endif
78
*ret = cpu->exception_index;
79
cpu->exception_index = -1;
80
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
81
if (replay_exception()) {
82
CPUClass *cc = CPU_GET_CLASS(cpu);
83
qemu_mutex_lock_iothread();
84
- cc->do_interrupt(cpu);
85
+ cc->tcg_ops.do_interrupt(cpu);
86
qemu_mutex_unlock_iothread();
87
cpu->exception_index = -1;
88
89
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/alpha/cpu.c
92
+++ b/target/alpha/cpu.c
93
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
94
95
cc->class_by_name = alpha_cpu_class_by_name;
96
cc->has_work = alpha_cpu_has_work;
97
- cc->do_interrupt = alpha_cpu_do_interrupt;
98
+ cc->tcg_ops.do_interrupt = alpha_cpu_do_interrupt;
99
cc->tcg_ops.cpu_exec_interrupt = alpha_cpu_exec_interrupt;
100
cc->dump_state = alpha_cpu_dump_state;
101
cc->set_pc = alpha_cpu_set_pc;
102
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/target/arm/cpu.c
105
+++ b/target/arm/cpu.c
106
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
107
found:
108
cs->exception_index = excp_idx;
109
env->exception.target_el = target_el;
110
- cc->do_interrupt(cs);
111
+ cc->tcg_ops.do_interrupt(cs);
112
return true;
30
return true;
113
}
31
}
114
32
115
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
33
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
116
cc->gdb_read_register = arm_cpu_gdb_read_register;
34
default:
117
cc->gdb_write_register = arm_cpu_gdb_write_register;
35
g_assert_not_reached();
118
#ifndef CONFIG_USER_ONLY
36
}
119
- cc->do_interrupt = arm_cpu_do_interrupt;
37
- s_mask = smask_from_zmask(z_mask);
120
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
38
121
cc->asidx_from_attrs = arm_asidx_from_attrs;
39
+ s_mask = 0;
122
cc->vmsd = &vmstate_arm_cpu;
40
switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
123
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
41
case TCG_BSWAP_OZ:
124
#if !defined(CONFIG_USER_ONLY)
42
break;
125
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
43
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
126
cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
44
default:
127
+ cc->tcg_ops.do_interrupt = arm_cpu_do_interrupt;
45
/* The high bits are undefined: force all bits above the sign to 1. */
128
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
46
z_mask |= sign << 1;
129
#endif
47
- s_mask = 0;
48
break;
49
}
50
ctx->z_mask = z_mask;
51
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
52
g_assert_not_reached();
53
}
54
ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
55
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
56
return false;
130
}
57
}
131
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
58
132
index XXXXXXX..XXXXXXX 100644
59
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
133
--- a/target/arm/cpu_tcg.c
60
default:
134
+++ b/target/arm/cpu_tcg.c
61
g_assert_not_reached();
135
@@ -XXX,XX +XXX,XX @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
136
if (interrupt_request & CPU_INTERRUPT_HARD
137
&& (armv7m_nvic_can_take_pending_exception(env->nvic))) {
138
cs->exception_index = EXCP_IRQ;
139
- cc->do_interrupt(cs);
140
+ cc->tcg_ops.do_interrupt(cs);
141
ret = true;
142
}
62
}
143
return ret;
63
- ctx->s_mask = smask_from_zmask(ctx->z_mask);
144
@@ -XXX,XX +XXX,XX @@ static void arm_v7m_class_init(ObjectClass *oc, void *data)
64
return false;
145
CPUClass *cc = CPU_CLASS(oc);
146
147
acc->info = data;
148
-#ifndef CONFIG_USER_ONLY
149
- cc->do_interrupt = arm_v7m_cpu_do_interrupt;
150
-#endif
151
-
152
#ifdef CONFIG_TCG
153
cc->tcg_ops.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
154
+#ifndef CONFIG_USER_ONLY
155
+ cc->tcg_ops.do_interrupt = arm_v7m_cpu_do_interrupt;
156
+#endif
157
#endif /* CONFIG_TCG */
158
159
cc->gdb_core_xml_file = "arm-m-profile.xml";
160
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/avr/cpu.c
163
+++ b/target/avr/cpu.c
164
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
165
cc->class_by_name = avr_cpu_class_by_name;
166
167
cc->has_work = avr_cpu_has_work;
168
- cc->do_interrupt = avr_cpu_do_interrupt;
169
+ cc->tcg_ops.do_interrupt = avr_cpu_do_interrupt;
170
cc->tcg_ops.cpu_exec_interrupt = avr_cpu_exec_interrupt;
171
cc->dump_state = avr_cpu_dump_state;
172
cc->set_pc = avr_cpu_set_pc;
173
diff --git a/target/avr/helper.c b/target/avr/helper.c
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/avr/helper.c
176
+++ b/target/avr/helper.c
177
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
178
if (interrupt_request & CPU_INTERRUPT_RESET) {
179
if (cpu_interrupts_enabled(env)) {
180
cs->exception_index = EXCP_RESET;
181
- cc->do_interrupt(cs);
182
+ cc->tcg_ops.do_interrupt(cs);
183
184
cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
185
186
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
187
if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
188
int index = ctz32(env->intsrc);
189
cs->exception_index = EXCP_INT(index);
190
- cc->do_interrupt(cs);
191
+ cc->tcg_ops.do_interrupt(cs);
192
193
env->intsrc &= env->intsrc - 1; /* clear the interrupt */
194
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
195
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/target/cris/cpu.c
198
+++ b/target/cris/cpu.c
199
@@ -XXX,XX +XXX,XX @@ static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
200
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
201
202
ccc->vr = 8;
203
- cc->do_interrupt = crisv10_cpu_do_interrupt;
204
+ cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
205
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
206
cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
207
}
65
}
208
@@ -XXX,XX +XXX,XX @@ static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
66
209
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
67
@@ -XXX,XX +XXX,XX @@ static bool fold_extract(OptContext *ctx, TCGOp *op)
210
68
return true;
211
ccc->vr = 9;
69
}
212
- cc->do_interrupt = crisv10_cpu_do_interrupt;
70
ctx->z_mask = z_mask;
213
+ cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
71
- ctx->s_mask = smask_from_zmask(z_mask);
214
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
72
215
cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
73
return fold_masks(ctx, op);
216
}
74
}
217
@@ -XXX,XX +XXX,XX @@ static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
75
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
218
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
219
220
ccc->vr = 10;
221
- cc->do_interrupt = crisv10_cpu_do_interrupt;
222
+ cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
223
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
224
cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
225
}
226
@@ -XXX,XX +XXX,XX @@ static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
227
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
228
229
ccc->vr = 11;
230
- cc->do_interrupt = crisv10_cpu_do_interrupt;
231
+ cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
232
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
233
cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
234
}
235
@@ -XXX,XX +XXX,XX @@ static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
236
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
237
238
ccc->vr = 17;
239
- cc->do_interrupt = crisv10_cpu_do_interrupt;
240
+ cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
241
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
242
cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
243
}
244
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
245
246
cc->class_by_name = cris_cpu_class_by_name;
247
cc->has_work = cris_cpu_has_work;
248
- cc->do_interrupt = cris_cpu_do_interrupt;
249
+ cc->tcg_ops.do_interrupt = cris_cpu_do_interrupt;
250
cc->tcg_ops.cpu_exec_interrupt = cris_cpu_exec_interrupt;
251
cc->dump_state = cris_cpu_dump_state;
252
cc->set_pc = cris_cpu_set_pc;
253
diff --git a/target/cris/helper.c b/target/cris/helper.c
254
index XXXXXXX..XXXXXXX 100644
255
--- a/target/cris/helper.c
256
+++ b/target/cris/helper.c
257
@@ -XXX,XX +XXX,XX @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
258
&& (env->pregs[PR_CCS] & I_FLAG)
259
&& !env->locked_irq) {
260
cs->exception_index = EXCP_IRQ;
261
- cc->do_interrupt(cs);
262
+ cc->tcg_ops.do_interrupt(cs);
263
ret = true;
264
}
76
}
265
if (interrupt_request & CPU_INTERRUPT_NMI) {
77
266
@@ -XXX,XX +XXX,XX @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
78
ctx->z_mask = z_mask;
267
}
79
- ctx->s_mask = smask_from_zmask(z_mask);
268
if ((env->pregs[PR_CCS] & m_flag_archval)) {
80
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
269
cs->exception_index = EXCP_NMI;
81
return true;
270
- cc->do_interrupt(cs);
82
}
271
+ cc->tcg_ops.do_interrupt(cs);
83
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
272
ret = true;
84
int width = 8 * memop_size(mop);
85
86
if (width < 64) {
87
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
88
- if (!(mop & MO_SIGN)) {
89
+ if (mop & MO_SIGN) {
90
+ ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
91
+ } else {
92
ctx->z_mask = MAKE_64BIT_MASK(0, width);
93
- ctx->s_mask <<= 1;
273
}
94
}
274
}
95
}
275
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
96
276
index XXXXXXX..XXXXXXX 100644
97
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
277
--- a/target/hppa/cpu.c
98
fold_setcond_tst_pow2(ctx, op, false);
278
+++ b/target/hppa/cpu.c
99
279
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
100
ctx->z_mask = 1;
280
101
- ctx->s_mask = smask_from_zmask(1);
281
cc->class_by_name = hppa_cpu_class_by_name;
102
return false;
282
cc->has_work = hppa_cpu_has_work;
103
}
283
- cc->do_interrupt = hppa_cpu_do_interrupt;
104
284
+ cc->tcg_ops.do_interrupt = hppa_cpu_do_interrupt;
105
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
285
cc->tcg_ops.cpu_exec_interrupt = hppa_cpu_exec_interrupt;
106
}
286
cc->dump_state = hppa_cpu_dump_state;
107
287
cc->set_pc = hppa_cpu_set_pc;
108
ctx->z_mask = 1;
288
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
109
- ctx->s_mask = smask_from_zmask(1);
289
index XXXXXXX..XXXXXXX 100644
110
return false;
290
--- a/target/i386/tcg/tcg-cpu.c
111
291
+++ b/target/i386/tcg/tcg-cpu.c
112
do_setcond_const:
292
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
113
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
293
114
break;
294
void tcg_cpu_common_class_init(CPUClass *cc)
115
CASE_OP_32_64(ld8u):
295
{
116
ctx->z_mask = MAKE_64BIT_MASK(0, 8);
296
- cc->do_interrupt = x86_cpu_do_interrupt;
117
- ctx->s_mask = MAKE_64BIT_MASK(9, 55);
297
+ cc->tcg_ops.do_interrupt = x86_cpu_do_interrupt;
118
break;
298
cc->tcg_ops.cpu_exec_interrupt = x86_cpu_exec_interrupt;
119
CASE_OP_32_64(ld16s):
299
cc->tcg_ops.synchronize_from_tb = x86_cpu_synchronize_from_tb;
120
ctx->s_mask = MAKE_64BIT_MASK(16, 48);
300
cc->tcg_ops.cpu_exec_enter = x86_cpu_exec_enter;
121
break;
301
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
122
CASE_OP_32_64(ld16u):
302
index XXXXXXX..XXXXXXX 100644
123
ctx->z_mask = MAKE_64BIT_MASK(0, 16);
303
--- a/target/lm32/cpu.c
124
- ctx->s_mask = MAKE_64BIT_MASK(17, 47);
304
+++ b/target/lm32/cpu.c
125
break;
305
@@ -XXX,XX +XXX,XX @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
126
case INDEX_op_ld32s_i64:
306
127
ctx->s_mask = MAKE_64BIT_MASK(32, 32);
307
cc->class_by_name = lm32_cpu_class_by_name;
128
break;
308
cc->has_work = lm32_cpu_has_work;
129
case INDEX_op_ld32u_i64:
309
- cc->do_interrupt = lm32_cpu_do_interrupt;
130
ctx->z_mask = MAKE_64BIT_MASK(0, 32);
310
+ cc->tcg_ops.do_interrupt = lm32_cpu_do_interrupt;
131
- ctx->s_mask = MAKE_64BIT_MASK(33, 31);
311
cc->tcg_ops.cpu_exec_interrupt = lm32_cpu_exec_interrupt;
132
break;
312
cc->dump_state = lm32_cpu_dump_state;
133
default:
313
cc->set_pc = lm32_cpu_set_pc;
134
g_assert_not_reached();
314
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
315
index XXXXXXX..XXXXXXX 100644
316
--- a/target/m68k/cpu.c
317
+++ b/target/m68k/cpu.c
318
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
319
320
cc->class_by_name = m68k_cpu_class_by_name;
321
cc->has_work = m68k_cpu_has_work;
322
- cc->do_interrupt = m68k_cpu_do_interrupt;
323
+ cc->tcg_ops.do_interrupt = m68k_cpu_do_interrupt;
324
cc->tcg_ops.cpu_exec_interrupt = m68k_cpu_exec_interrupt;
325
cc->dump_state = m68k_cpu_dump_state;
326
cc->set_pc = m68k_cpu_set_pc;
327
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
328
index XXXXXXX..XXXXXXX 100644
329
--- a/target/microblaze/cpu.c
330
+++ b/target/microblaze/cpu.c
331
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
332
333
cc->class_by_name = mb_cpu_class_by_name;
334
cc->has_work = mb_cpu_has_work;
335
- cc->do_interrupt = mb_cpu_do_interrupt;
336
+ cc->tcg_ops.do_interrupt = mb_cpu_do_interrupt;
337
cc->do_unaligned_access = mb_cpu_do_unaligned_access;
338
cc->tcg_ops.cpu_exec_interrupt = mb_cpu_exec_interrupt;
339
cc->dump_state = mb_cpu_dump_state;
340
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
341
index XXXXXXX..XXXXXXX 100644
342
--- a/target/mips/cpu.c
343
+++ b/target/mips/cpu.c
344
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
345
346
cc->class_by_name = mips_cpu_class_by_name;
347
cc->has_work = mips_cpu_has_work;
348
- cc->do_interrupt = mips_cpu_do_interrupt;
349
cc->dump_state = mips_cpu_dump_state;
350
cc->set_pc = mips_cpu_set_pc;
351
cc->gdb_read_register = mips_cpu_gdb_read_register;
352
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
353
cc->disas_set_info = mips_cpu_disas_set_info;
354
#ifdef CONFIG_TCG
355
cc->tcg_ops.initialize = mips_tcg_init;
356
+ cc->tcg_ops.do_interrupt = mips_cpu_do_interrupt;
357
cc->tcg_ops.cpu_exec_interrupt = mips_cpu_exec_interrupt;
358
cc->tcg_ops.synchronize_from_tb = mips_cpu_synchronize_from_tb;
359
cc->tcg_ops.tlb_fill = mips_cpu_tlb_fill;
360
-#endif
361
+#endif /* CONFIG_TCG */
362
363
cc->gdb_num_core_regs = 73;
364
cc->gdb_stop_before_watchpoint = true;
365
diff --git a/target/moxie/cpu.c b/target/moxie/cpu.c
366
index XXXXXXX..XXXXXXX 100644
367
--- a/target/moxie/cpu.c
368
+++ b/target/moxie/cpu.c
369
@@ -XXX,XX +XXX,XX @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
370
cc->class_by_name = moxie_cpu_class_by_name;
371
372
cc->has_work = moxie_cpu_has_work;
373
- cc->do_interrupt = moxie_cpu_do_interrupt;
374
+ cc->tcg_ops.do_interrupt = moxie_cpu_do_interrupt;
375
cc->dump_state = moxie_cpu_dump_state;
376
cc->set_pc = moxie_cpu_set_pc;
377
cc->tcg_ops.tlb_fill = moxie_cpu_tlb_fill;
378
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
379
index XXXXXXX..XXXXXXX 100644
380
--- a/target/nios2/cpu.c
381
+++ b/target/nios2/cpu.c
382
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
383
384
cc->class_by_name = nios2_cpu_class_by_name;
385
cc->has_work = nios2_cpu_has_work;
386
- cc->do_interrupt = nios2_cpu_do_interrupt;
387
+ cc->tcg_ops.do_interrupt = nios2_cpu_do_interrupt;
388
cc->tcg_ops.cpu_exec_interrupt = nios2_cpu_exec_interrupt;
389
cc->dump_state = nios2_cpu_dump_state;
390
cc->set_pc = nios2_cpu_set_pc;
391
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
392
index XXXXXXX..XXXXXXX 100644
393
--- a/target/openrisc/cpu.c
394
+++ b/target/openrisc/cpu.c
395
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
396
397
cc->class_by_name = openrisc_cpu_class_by_name;
398
cc->has_work = openrisc_cpu_has_work;
399
- cc->do_interrupt = openrisc_cpu_do_interrupt;
400
+ cc->tcg_ops.do_interrupt = openrisc_cpu_do_interrupt;
401
cc->tcg_ops.cpu_exec_interrupt = openrisc_cpu_exec_interrupt;
402
cc->dump_state = openrisc_cpu_dump_state;
403
cc->set_pc = openrisc_cpu_set_pc;
404
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
405
index XXXXXXX..XXXXXXX 100644
406
--- a/target/riscv/cpu.c
407
+++ b/target/riscv/cpu.c
408
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
409
410
cc->class_by_name = riscv_cpu_class_by_name;
411
cc->has_work = riscv_cpu_has_work;
412
- cc->do_interrupt = riscv_cpu_do_interrupt;
413
+ cc->tcg_ops.do_interrupt = riscv_cpu_do_interrupt;
414
cc->tcg_ops.cpu_exec_interrupt = riscv_cpu_exec_interrupt;
415
cc->dump_state = riscv_cpu_dump_state;
416
cc->set_pc = riscv_cpu_set_pc;
417
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
418
index XXXXXXX..XXXXXXX 100644
419
--- a/target/rx/cpu.c
420
+++ b/target/rx/cpu.c
421
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
422
423
cc->class_by_name = rx_cpu_class_by_name;
424
cc->has_work = rx_cpu_has_work;
425
- cc->do_interrupt = rx_cpu_do_interrupt;
426
+ cc->tcg_ops.do_interrupt = rx_cpu_do_interrupt;
427
cc->tcg_ops.cpu_exec_interrupt = rx_cpu_exec_interrupt;
428
cc->dump_state = rx_cpu_dump_state;
429
cc->set_pc = rx_cpu_set_pc;
430
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
431
index XXXXXXX..XXXXXXX 100644
432
--- a/target/s390x/cpu.c
433
+++ b/target/s390x/cpu.c
434
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
435
cc->class_by_name = s390_cpu_class_by_name,
436
cc->has_work = s390_cpu_has_work;
437
#ifdef CONFIG_TCG
438
- cc->do_interrupt = s390_cpu_do_interrupt;
439
+ cc->tcg_ops.do_interrupt = s390_cpu_do_interrupt;
440
#endif
441
cc->dump_state = s390_cpu_dump_state;
442
cc->set_pc = s390_cpu_set_pc;
443
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/sh4/cpu.c
446
+++ b/target/sh4/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
448
449
cc->class_by_name = superh_cpu_class_by_name;
450
cc->has_work = superh_cpu_has_work;
451
- cc->do_interrupt = superh_cpu_do_interrupt;
452
+ cc->tcg_ops.do_interrupt = superh_cpu_do_interrupt;
453
cc->tcg_ops.cpu_exec_interrupt = superh_cpu_exec_interrupt;
454
cc->dump_state = superh_cpu_dump_state;
455
cc->set_pc = superh_cpu_set_pc;
456
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
457
index XXXXXXX..XXXXXXX 100644
458
--- a/target/sparc/cpu.c
459
+++ b/target/sparc/cpu.c
460
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
461
cc->class_by_name = sparc_cpu_class_by_name;
462
cc->parse_features = sparc_cpu_parse_features;
463
cc->has_work = sparc_cpu_has_work;
464
- cc->do_interrupt = sparc_cpu_do_interrupt;
465
+ cc->tcg_ops.do_interrupt = sparc_cpu_do_interrupt;
466
cc->tcg_ops.cpu_exec_interrupt = sparc_cpu_exec_interrupt;
467
cc->dump_state = sparc_cpu_dump_state;
468
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
469
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
470
index XXXXXXX..XXXXXXX 100644
471
--- a/target/tilegx/cpu.c
472
+++ b/target/tilegx/cpu.c
473
@@ -XXX,XX +XXX,XX @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
474
475
cc->class_by_name = tilegx_cpu_class_by_name;
476
cc->has_work = tilegx_cpu_has_work;
477
- cc->do_interrupt = tilegx_cpu_do_interrupt;
478
+ cc->tcg_ops.do_interrupt = tilegx_cpu_do_interrupt;
479
cc->tcg_ops.cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
480
cc->dump_state = tilegx_cpu_dump_state;
481
cc->set_pc = tilegx_cpu_set_pc;
482
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
483
index XXXXXXX..XXXXXXX 100644
484
--- a/target/unicore32/cpu.c
485
+++ b/target/unicore32/cpu.c
486
@@ -XXX,XX +XXX,XX @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
487
488
cc->class_by_name = uc32_cpu_class_by_name;
489
cc->has_work = uc32_cpu_has_work;
490
- cc->do_interrupt = uc32_cpu_do_interrupt;
491
+ cc->tcg_ops.do_interrupt = uc32_cpu_do_interrupt;
492
cc->tcg_ops.cpu_exec_interrupt = uc32_cpu_exec_interrupt;
493
cc->dump_state = uc32_cpu_dump_state;
494
cc->set_pc = uc32_cpu_set_pc;
495
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
496
index XXXXXXX..XXXXXXX 100644
497
--- a/target/xtensa/cpu.c
498
+++ b/target/xtensa/cpu.c
499
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
500
501
cc->class_by_name = xtensa_cpu_class_by_name;
502
cc->has_work = xtensa_cpu_has_work;
503
- cc->do_interrupt = xtensa_cpu_do_interrupt;
504
+ cc->tcg_ops.do_interrupt = xtensa_cpu_do_interrupt;
505
cc->tcg_ops.cpu_exec_interrupt = xtensa_cpu_exec_interrupt;
506
cc->dump_state = xtensa_cpu_dump_state;
507
cc->set_pc = xtensa_cpu_set_pc;
508
diff --git a/target/ppc/translate_init.c.inc b/target/ppc/translate_init.c.inc
509
index XXXXXXX..XXXXXXX 100644
510
--- a/target/ppc/translate_init.c.inc
511
+++ b/target/ppc/translate_init.c.inc
512
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
513
514
cc->class_by_name = ppc_cpu_class_by_name;
515
cc->has_work = ppc_cpu_has_work;
516
- cc->do_interrupt = ppc_cpu_do_interrupt;
517
cc->dump_state = ppc_cpu_dump_state;
518
cc->dump_statistics = ppc_cpu_dump_statistics;
519
cc->set_pc = ppc_cpu_set_pc;
520
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
521
#ifdef CONFIG_TCG
522
cc->tcg_ops.initialize = ppc_translate_init;
523
cc->tcg_ops.cpu_exec_interrupt = ppc_cpu_exec_interrupt;
524
+ cc->tcg_ops.do_interrupt = ppc_cpu_do_interrupt;
525
cc->tcg_ops.tlb_fill = ppc_cpu_tlb_fill;
526
#ifndef CONFIG_USER_ONLY
527
cc->tcg_ops.cpu_exec_enter = ppc_cpu_exec_enter;
528
--
135
--
529
2.25.1
136
2.43.0
530
531
diff view generated by jsdifflib
1
Restrict all operands to registers. All constants will be forced
1
Change the representation from sign bit repetitions to all bits equal
2
into registers by the middle-end. Removing the difference in how
2
to the sign bit, including the sign bit itself.
3
immediate integers were encoded will allow more code to be shared
4
between 32-bit and 64-bit operations.
5
3
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
The previous format has a problem in that it is difficult to recreate
5
a valid sign mask after a shift operation: the "repetitions" part of
6
the previous format meant that applying the same shift as for the value
7
lead to an off-by-one value.
8
9
The new format, including the sign bit itself, means that the sign mask
10
can be manipulated in exactly the same way as the value, canonicalization
11
is easier.
12
13
Canonicalize the s_mask in fold_masks_zs, rather than requiring callers
14
to do so. Treat 0 as a non-canonical but typeless input for no sign
15
information, which will be reset as appropriate for the data type.
16
We can easily fold in the data from z_mask while canonicalizing.
17
18
Temporarily disable optimizations using s_mask while each operation is
19
converted to use fold_masks_zs and to the new form.
20
21
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
23
---
9
tcg/tci/tcg-target-con-set.h | 6 +-
24
tcg/optimize.c | 64 ++++++++++++--------------------------------------
10
tcg/tci/tcg-target.h | 3 -
25
1 file changed, 15 insertions(+), 49 deletions(-)
11
tcg/tci.c | 189 +++++++++++++----------------------
12
tcg/tci/tcg-target.c.inc | 85 ++++------------
13
4 files changed, 89 insertions(+), 194 deletions(-)
14
26
15
diff --git a/tcg/tci/tcg-target-con-set.h b/tcg/tci/tcg-target-con-set.h
27
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
28
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/tci/tcg-target-con-set.h
29
--- a/tcg/optimize.c
18
+++ b/tcg/tci/tcg-target-con-set.h
30
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@
31
@@ -XXX,XX +XXX,XX @@ typedef struct TempOptInfo {
20
* tcg-target-con-str.h; the constraint combination is inclusive or.
32
QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
21
*/
33
uint64_t val;
22
C_O0_I2(r, r)
34
uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
23
-C_O0_I2(r, ri)
35
- uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */
24
C_O0_I3(r, r, r)
36
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
25
-C_O0_I4(r, r, ri, ri)
37
} TempOptInfo;
26
C_O0_I4(r, r, r, r)
38
27
C_O1_I1(r, r)
39
typedef struct OptContext {
28
C_O1_I2(r, 0, r)
40
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
29
-C_O1_I2(r, ri, ri)
41
30
C_O1_I2(r, r, r)
42
/* In flight values from optimization. */
31
-C_O1_I2(r, r, ri)
43
uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
32
-C_O1_I4(r, r, r, ri, ri)
44
- uint64_t s_mask; /* mask of clrsb(value) bits */
33
+C_O1_I4(r, r, r, r, r)
45
+ uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
34
C_O2_I1(r, r, r)
46
TCGType type;
35
C_O2_I2(r, r, r, r)
47
} OptContext;
36
C_O2_I4(r, r, r, r, r, r)
48
37
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
49
-/* Calculate the smask for a specific value. */
38
index XXXXXXX..XXXXXXX 100644
50
-static uint64_t smask_from_value(uint64_t value)
39
--- a/tcg/tci/tcg-target.h
40
+++ b/tcg/tci/tcg-target.h
41
@@ -XXX,XX +XXX,XX @@ typedef enum {
42
43
TCG_AREG0 = TCG_REG_R14,
44
TCG_REG_CALL_STACK = TCG_REG_R15,
45
-
46
- /* Special value UINT8_MAX is used by TCI to encode constant values. */
47
- TCG_CONST = UINT8_MAX
48
} TCGReg;
49
50
/* Used for function call generation. */
51
diff --git a/tcg/tci.c b/tcg/tci.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/tci.c
54
+++ b/tcg/tci.c
55
@@ -XXX,XX +XXX,XX @@ tci_read_ulong(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
56
return taddr;
57
}
58
59
-/* Read indexed register or constant (native size) from bytecode. */
60
-static tcg_target_ulong
61
-tci_read_ri(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
62
-{
51
-{
63
- tcg_target_ulong value;
52
- int rep = clrsb64(value);
64
- TCGReg r = **tb_ptr;
53
- return ~(~0ull >> rep);
65
- *tb_ptr += 1;
66
- if (r == TCG_CONST) {
67
- value = tci_read_i(tb_ptr);
68
- } else {
69
- value = tci_read_reg(regs, r);
70
- }
71
- return value;
72
-}
54
-}
73
-
55
-
74
-/* Read indexed register or constant (32 bit) from bytecode. */
56
-/*
75
-static uint32_t tci_read_ri32(const tcg_target_ulong *regs,
57
- * Calculate the smask for a given set of known-zeros.
76
- const uint8_t **tb_ptr)
58
- * If there are lots of zeros on the left, we can consider the remainder
59
- * an unsigned field, and thus the corresponding signed field is one bit
60
- * larger.
61
- */
62
-static uint64_t smask_from_zmask(uint64_t zmask)
77
-{
63
-{
78
- uint32_t value;
64
- /*
79
- TCGReg r = **tb_ptr;
65
- * Only the 0 bits are significant for zmask, thus the msb itself
80
- *tb_ptr += 1;
66
- * must be zero, else we have no sign information.
81
- if (r == TCG_CONST) {
67
- */
82
- value = tci_read_i32(tb_ptr);
68
- int rep = clz64(zmask);
83
- } else {
69
- if (rep == 0) {
84
- value = tci_read_reg32(regs, r);
70
- return 0;
85
- }
71
- }
86
- return value;
72
- rep -= 1;
73
- return ~(~0ull >> rep);
87
-}
74
-}
88
-
75
-
89
-#if TCG_TARGET_REG_BITS == 32
76
-/*
90
-/* Read two indexed registers or constants (2 * 32 bit) from bytecode. */
77
- * Recreate a properly left-aligned smask after manipulation.
91
-static uint64_t tci_read_ri64(const tcg_target_ulong *regs,
78
- * Some bit-shuffling, particularly shifts and rotates, may
92
- const uint8_t **tb_ptr)
79
- * retain sign bits on the left, but may scatter disconnected
80
- * sign bits on the right. Retain only what remains to the left.
81
- */
82
-static uint64_t smask_from_smask(int64_t smask)
93
-{
83
-{
94
- uint32_t low = tci_read_ri32(regs, tb_ptr);
84
- /* Only the 1 bits are significant for smask */
95
- return tci_uint64(tci_read_ri32(regs, tb_ptr), low);
85
- return smask_from_zmask(~smask);
96
-}
97
-#elif TCG_TARGET_REG_BITS == 64
98
-/* Read indexed register or constant (64 bit) from bytecode. */
99
-static uint64_t tci_read_ri64(const tcg_target_ulong *regs,
100
- const uint8_t **tb_ptr)
101
-{
102
- uint64_t value;
103
- TCGReg r = **tb_ptr;
104
- *tb_ptr += 1;
105
- if (r == TCG_CONST) {
106
- value = tci_read_i64(tb_ptr);
107
- } else {
108
- value = tci_read_reg64(regs, r);
109
- }
110
- return value;
111
-}
112
-#endif
113
-
114
static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr)
115
{
116
tcg_target_ulong label = tci_read_i(tb_ptr);
117
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
118
119
switch (opc) {
120
case INDEX_op_call:
121
- t0 = tci_read_ri(regs, &tb_ptr);
122
+ t0 = tci_read_i(&tb_ptr);
123
tci_tb_ptr = (uintptr_t)tb_ptr;
124
#if TCG_TARGET_REG_BITS == 32
125
tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
126
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
127
case INDEX_op_setcond_i32:
128
t0 = *tb_ptr++;
129
t1 = tci_read_r32(regs, &tb_ptr);
130
- t2 = tci_read_ri32(regs, &tb_ptr);
131
+ t2 = tci_read_r32(regs, &tb_ptr);
132
condition = *tb_ptr++;
133
tci_write_reg(regs, t0, tci_compare32(t1, t2, condition));
134
break;
135
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
136
case INDEX_op_setcond2_i32:
137
t0 = *tb_ptr++;
138
tmp64 = tci_read_r64(regs, &tb_ptr);
139
- v64 = tci_read_ri64(regs, &tb_ptr);
140
+ v64 = tci_read_r64(regs, &tb_ptr);
141
condition = *tb_ptr++;
142
tci_write_reg(regs, t0, tci_compare64(tmp64, v64, condition));
143
break;
144
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
145
case INDEX_op_setcond_i64:
146
t0 = *tb_ptr++;
147
t1 = tci_read_r64(regs, &tb_ptr);
148
- t2 = tci_read_ri64(regs, &tb_ptr);
149
+ t2 = tci_read_r64(regs, &tb_ptr);
150
condition = *tb_ptr++;
151
tci_write_reg(regs, t0, tci_compare64(t1, t2, condition));
152
break;
153
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
154
155
case INDEX_op_add_i32:
156
t0 = *tb_ptr++;
157
- t1 = tci_read_ri32(regs, &tb_ptr);
158
- t2 = tci_read_ri32(regs, &tb_ptr);
159
+ t1 = tci_read_r32(regs, &tb_ptr);
160
+ t2 = tci_read_r32(regs, &tb_ptr);
161
tci_write_reg(regs, t0, t1 + t2);
162
break;
163
case INDEX_op_sub_i32:
164
t0 = *tb_ptr++;
165
- t1 = tci_read_ri32(regs, &tb_ptr);
166
- t2 = tci_read_ri32(regs, &tb_ptr);
167
+ t1 = tci_read_r32(regs, &tb_ptr);
168
+ t2 = tci_read_r32(regs, &tb_ptr);
169
tci_write_reg(regs, t0, t1 - t2);
170
break;
171
case INDEX_op_mul_i32:
172
t0 = *tb_ptr++;
173
- t1 = tci_read_ri32(regs, &tb_ptr);
174
- t2 = tci_read_ri32(regs, &tb_ptr);
175
+ t1 = tci_read_r32(regs, &tb_ptr);
176
+ t2 = tci_read_r32(regs, &tb_ptr);
177
tci_write_reg(regs, t0, t1 * t2);
178
break;
179
case INDEX_op_div_i32:
180
t0 = *tb_ptr++;
181
- t1 = tci_read_ri32(regs, &tb_ptr);
182
- t2 = tci_read_ri32(regs, &tb_ptr);
183
+ t1 = tci_read_r32(regs, &tb_ptr);
184
+ t2 = tci_read_r32(regs, &tb_ptr);
185
tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
186
break;
187
case INDEX_op_divu_i32:
188
t0 = *tb_ptr++;
189
- t1 = tci_read_ri32(regs, &tb_ptr);
190
- t2 = tci_read_ri32(regs, &tb_ptr);
191
+ t1 = tci_read_r32(regs, &tb_ptr);
192
+ t2 = tci_read_r32(regs, &tb_ptr);
193
tci_write_reg(regs, t0, t1 / t2);
194
break;
195
case INDEX_op_rem_i32:
196
t0 = *tb_ptr++;
197
- t1 = tci_read_ri32(regs, &tb_ptr);
198
- t2 = tci_read_ri32(regs, &tb_ptr);
199
+ t1 = tci_read_r32(regs, &tb_ptr);
200
+ t2 = tci_read_r32(regs, &tb_ptr);
201
tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
202
break;
203
case INDEX_op_remu_i32:
204
t0 = *tb_ptr++;
205
- t1 = tci_read_ri32(regs, &tb_ptr);
206
- t2 = tci_read_ri32(regs, &tb_ptr);
207
+ t1 = tci_read_r32(regs, &tb_ptr);
208
+ t2 = tci_read_r32(regs, &tb_ptr);
209
tci_write_reg(regs, t0, t1 % t2);
210
break;
211
case INDEX_op_and_i32:
212
t0 = *tb_ptr++;
213
- t1 = tci_read_ri32(regs, &tb_ptr);
214
- t2 = tci_read_ri32(regs, &tb_ptr);
215
+ t1 = tci_read_r32(regs, &tb_ptr);
216
+ t2 = tci_read_r32(regs, &tb_ptr);
217
tci_write_reg(regs, t0, t1 & t2);
218
break;
219
case INDEX_op_or_i32:
220
t0 = *tb_ptr++;
221
- t1 = tci_read_ri32(regs, &tb_ptr);
222
- t2 = tci_read_ri32(regs, &tb_ptr);
223
+ t1 = tci_read_r32(regs, &tb_ptr);
224
+ t2 = tci_read_r32(regs, &tb_ptr);
225
tci_write_reg(regs, t0, t1 | t2);
226
break;
227
case INDEX_op_xor_i32:
228
t0 = *tb_ptr++;
229
- t1 = tci_read_ri32(regs, &tb_ptr);
230
- t2 = tci_read_ri32(regs, &tb_ptr);
231
+ t1 = tci_read_r32(regs, &tb_ptr);
232
+ t2 = tci_read_r32(regs, &tb_ptr);
233
tci_write_reg(regs, t0, t1 ^ t2);
234
break;
235
236
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
237
238
case INDEX_op_shl_i32:
239
t0 = *tb_ptr++;
240
- t1 = tci_read_ri32(regs, &tb_ptr);
241
- t2 = tci_read_ri32(regs, &tb_ptr);
242
+ t1 = tci_read_r32(regs, &tb_ptr);
243
+ t2 = tci_read_r32(regs, &tb_ptr);
244
tci_write_reg(regs, t0, t1 << (t2 & 31));
245
break;
246
case INDEX_op_shr_i32:
247
t0 = *tb_ptr++;
248
- t1 = tci_read_ri32(regs, &tb_ptr);
249
- t2 = tci_read_ri32(regs, &tb_ptr);
250
+ t1 = tci_read_r32(regs, &tb_ptr);
251
+ t2 = tci_read_r32(regs, &tb_ptr);
252
tci_write_reg(regs, t0, t1 >> (t2 & 31));
253
break;
254
case INDEX_op_sar_i32:
255
t0 = *tb_ptr++;
256
- t1 = tci_read_ri32(regs, &tb_ptr);
257
- t2 = tci_read_ri32(regs, &tb_ptr);
258
+ t1 = tci_read_r32(regs, &tb_ptr);
259
+ t2 = tci_read_r32(regs, &tb_ptr);
260
tci_write_reg(regs, t0, ((int32_t)t1 >> (t2 & 31)));
261
break;
262
#if TCG_TARGET_HAS_rot_i32
263
case INDEX_op_rotl_i32:
264
t0 = *tb_ptr++;
265
- t1 = tci_read_ri32(regs, &tb_ptr);
266
- t2 = tci_read_ri32(regs, &tb_ptr);
267
+ t1 = tci_read_r32(regs, &tb_ptr);
268
+ t2 = tci_read_r32(regs, &tb_ptr);
269
tci_write_reg(regs, t0, rol32(t1, t2 & 31));
270
break;
271
case INDEX_op_rotr_i32:
272
t0 = *tb_ptr++;
273
- t1 = tci_read_ri32(regs, &tb_ptr);
274
- t2 = tci_read_ri32(regs, &tb_ptr);
275
+ t1 = tci_read_r32(regs, &tb_ptr);
276
+ t2 = tci_read_r32(regs, &tb_ptr);
277
tci_write_reg(regs, t0, ror32(t1, t2 & 31));
278
break;
279
#endif
280
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
281
#endif
282
case INDEX_op_brcond_i32:
283
t0 = tci_read_r32(regs, &tb_ptr);
284
- t1 = tci_read_ri32(regs, &tb_ptr);
285
+ t1 = tci_read_r32(regs, &tb_ptr);
286
condition = *tb_ptr++;
287
label = tci_read_label(&tb_ptr);
288
if (tci_compare32(t0, t1, condition)) {
289
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
290
break;
291
case INDEX_op_brcond2_i32:
292
tmp64 = tci_read_r64(regs, &tb_ptr);
293
- v64 = tci_read_ri64(regs, &tb_ptr);
294
+ v64 = tci_read_r64(regs, &tb_ptr);
295
condition = *tb_ptr++;
296
label = tci_read_label(&tb_ptr);
297
if (tci_compare64(tmp64, v64, condition)) {
298
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
299
300
case INDEX_op_add_i64:
301
t0 = *tb_ptr++;
302
- t1 = tci_read_ri64(regs, &tb_ptr);
303
- t2 = tci_read_ri64(regs, &tb_ptr);
304
+ t1 = tci_read_r64(regs, &tb_ptr);
305
+ t2 = tci_read_r64(regs, &tb_ptr);
306
tci_write_reg(regs, t0, t1 + t2);
307
break;
308
case INDEX_op_sub_i64:
309
t0 = *tb_ptr++;
310
- t1 = tci_read_ri64(regs, &tb_ptr);
311
- t2 = tci_read_ri64(regs, &tb_ptr);
312
+ t1 = tci_read_r64(regs, &tb_ptr);
313
+ t2 = tci_read_r64(regs, &tb_ptr);
314
tci_write_reg(regs, t0, t1 - t2);
315
break;
316
case INDEX_op_mul_i64:
317
t0 = *tb_ptr++;
318
- t1 = tci_read_ri64(regs, &tb_ptr);
319
- t2 = tci_read_ri64(regs, &tb_ptr);
320
+ t1 = tci_read_r64(regs, &tb_ptr);
321
+ t2 = tci_read_r64(regs, &tb_ptr);
322
tci_write_reg(regs, t0, t1 * t2);
323
break;
324
case INDEX_op_div_i64:
325
t0 = *tb_ptr++;
326
- t1 = tci_read_ri64(regs, &tb_ptr);
327
- t2 = tci_read_ri64(regs, &tb_ptr);
328
+ t1 = tci_read_r64(regs, &tb_ptr);
329
+ t2 = tci_read_r64(regs, &tb_ptr);
330
tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2);
331
break;
332
case INDEX_op_divu_i64:
333
t0 = *tb_ptr++;
334
- t1 = tci_read_ri64(regs, &tb_ptr);
335
- t2 = tci_read_ri64(regs, &tb_ptr);
336
+ t1 = tci_read_r64(regs, &tb_ptr);
337
+ t2 = tci_read_r64(regs, &tb_ptr);
338
tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2);
339
break;
340
case INDEX_op_rem_i64:
341
t0 = *tb_ptr++;
342
- t1 = tci_read_ri64(regs, &tb_ptr);
343
- t2 = tci_read_ri64(regs, &tb_ptr);
344
+ t1 = tci_read_r64(regs, &tb_ptr);
345
+ t2 = tci_read_r64(regs, &tb_ptr);
346
tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2);
347
break;
348
case INDEX_op_remu_i64:
349
t0 = *tb_ptr++;
350
- t1 = tci_read_ri64(regs, &tb_ptr);
351
- t2 = tci_read_ri64(regs, &tb_ptr);
352
+ t1 = tci_read_r64(regs, &tb_ptr);
353
+ t2 = tci_read_r64(regs, &tb_ptr);
354
tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2);
355
break;
356
case INDEX_op_and_i64:
357
t0 = *tb_ptr++;
358
- t1 = tci_read_ri64(regs, &tb_ptr);
359
- t2 = tci_read_ri64(regs, &tb_ptr);
360
+ t1 = tci_read_r64(regs, &tb_ptr);
361
+ t2 = tci_read_r64(regs, &tb_ptr);
362
tci_write_reg(regs, t0, t1 & t2);
363
break;
364
case INDEX_op_or_i64:
365
t0 = *tb_ptr++;
366
- t1 = tci_read_ri64(regs, &tb_ptr);
367
- t2 = tci_read_ri64(regs, &tb_ptr);
368
+ t1 = tci_read_r64(regs, &tb_ptr);
369
+ t2 = tci_read_r64(regs, &tb_ptr);
370
tci_write_reg(regs, t0, t1 | t2);
371
break;
372
case INDEX_op_xor_i64:
373
t0 = *tb_ptr++;
374
- t1 = tci_read_ri64(regs, &tb_ptr);
375
- t2 = tci_read_ri64(regs, &tb_ptr);
376
+ t1 = tci_read_r64(regs, &tb_ptr);
377
+ t2 = tci_read_r64(regs, &tb_ptr);
378
tci_write_reg(regs, t0, t1 ^ t2);
379
break;
380
381
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
382
383
case INDEX_op_shl_i64:
384
t0 = *tb_ptr++;
385
- t1 = tci_read_ri64(regs, &tb_ptr);
386
- t2 = tci_read_ri64(regs, &tb_ptr);
387
+ t1 = tci_read_r64(regs, &tb_ptr);
388
+ t2 = tci_read_r64(regs, &tb_ptr);
389
tci_write_reg(regs, t0, t1 << (t2 & 63));
390
break;
391
case INDEX_op_shr_i64:
392
t0 = *tb_ptr++;
393
- t1 = tci_read_ri64(regs, &tb_ptr);
394
- t2 = tci_read_ri64(regs, &tb_ptr);
395
+ t1 = tci_read_r64(regs, &tb_ptr);
396
+ t2 = tci_read_r64(regs, &tb_ptr);
397
tci_write_reg(regs, t0, t1 >> (t2 & 63));
398
break;
399
case INDEX_op_sar_i64:
400
t0 = *tb_ptr++;
401
- t1 = tci_read_ri64(regs, &tb_ptr);
402
- t2 = tci_read_ri64(regs, &tb_ptr);
403
+ t1 = tci_read_r64(regs, &tb_ptr);
404
+ t2 = tci_read_r64(regs, &tb_ptr);
405
tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63)));
406
break;
407
#if TCG_TARGET_HAS_rot_i64
408
case INDEX_op_rotl_i64:
409
t0 = *tb_ptr++;
410
- t1 = tci_read_ri64(regs, &tb_ptr);
411
- t2 = tci_read_ri64(regs, &tb_ptr);
412
+ t1 = tci_read_r64(regs, &tb_ptr);
413
+ t2 = tci_read_r64(regs, &tb_ptr);
414
tci_write_reg(regs, t0, rol64(t1, t2 & 63));
415
break;
416
case INDEX_op_rotr_i64:
417
t0 = *tb_ptr++;
418
- t1 = tci_read_ri64(regs, &tb_ptr);
419
- t2 = tci_read_ri64(regs, &tb_ptr);
420
+ t1 = tci_read_r64(regs, &tb_ptr);
421
+ t2 = tci_read_r64(regs, &tb_ptr);
422
tci_write_reg(regs, t0, ror64(t1, t2 & 63));
423
break;
424
#endif
425
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
426
#endif
427
case INDEX_op_brcond_i64:
428
t0 = tci_read_r64(regs, &tb_ptr);
429
- t1 = tci_read_ri64(regs, &tb_ptr);
430
+ t1 = tci_read_r64(regs, &tb_ptr);
431
condition = *tb_ptr++;
432
label = tci_read_label(&tb_ptr);
433
if (tci_compare64(t0, t1, condition)) {
434
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
435
index XXXXXXX..XXXXXXX 100644
436
--- a/tcg/tci/tcg-target.c.inc
437
+++ b/tcg/tci/tcg-target.c.inc
438
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
439
case INDEX_op_rem_i64:
440
case INDEX_op_remu_i32:
441
case INDEX_op_remu_i64:
442
- return C_O1_I2(r, r, r);
443
-
444
case INDEX_op_add_i32:
445
case INDEX_op_add_i64:
446
case INDEX_op_sub_i32:
447
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
448
case INDEX_op_rotl_i64:
449
case INDEX_op_rotr_i32:
450
case INDEX_op_rotr_i64:
451
- /* TODO: Does R, RI, RI result in faster code than R, R, RI? */
452
- return C_O1_I2(r, ri, ri);
453
+ case INDEX_op_setcond_i32:
454
+ case INDEX_op_setcond_i64:
455
+ return C_O1_I2(r, r, r);
456
457
case INDEX_op_deposit_i32:
458
case INDEX_op_deposit_i64:
459
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
460
461
case INDEX_op_brcond_i32:
462
case INDEX_op_brcond_i64:
463
- return C_O0_I2(r, ri);
464
-
465
- case INDEX_op_setcond_i32:
466
- case INDEX_op_setcond_i64:
467
- return C_O1_I2(r, r, ri);
468
+ return C_O0_I2(r, r);
469
470
#if TCG_TARGET_REG_BITS == 32
471
/* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
472
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
473
case INDEX_op_sub2_i32:
474
return C_O2_I4(r, r, r, r, r, r);
475
case INDEX_op_brcond2_i32:
476
- return C_O0_I4(r, r, ri, ri);
477
+ return C_O0_I4(r, r, r, r);
478
case INDEX_op_mulu2_i32:
479
return C_O2_I2(r, r, r, r);
480
case INDEX_op_setcond2_i32:
481
- return C_O1_I4(r, r, r, ri, ri);
482
+ return C_O1_I4(r, r, r, r, r);
483
#endif
484
485
case INDEX_op_qemu_ld_i32:
486
@@ -XXX,XX +XXX,XX @@ static void tcg_out_r(TCGContext *s, TCGArg t0)
487
tcg_out8(s, t0);
488
}
489
490
-/* Write register or constant (native size). */
491
-static void tcg_out_ri(TCGContext *s, int const_arg, TCGArg arg)
492
-{
493
- if (const_arg) {
494
- tcg_debug_assert(const_arg == 1);
495
- tcg_out8(s, TCG_CONST);
496
- tcg_out_i(s, arg);
497
- } else {
498
- tcg_out_r(s, arg);
499
- }
500
-}
86
-}
501
-
87
-
502
-/* Write register or constant (32 bit). */
88
static inline TempOptInfo *ts_info(TCGTemp *ts)
503
-static void tcg_out_ri32(TCGContext *s, int const_arg, TCGArg arg)
504
-{
505
- if (const_arg) {
506
- tcg_debug_assert(const_arg == 1);
507
- tcg_out8(s, TCG_CONST);
508
- tcg_out32(s, arg);
509
- } else {
510
- tcg_out_r(s, arg);
511
- }
512
-}
513
-
514
-#if TCG_TARGET_REG_BITS == 64
515
-/* Write register or constant (64 bit). */
516
-static void tcg_out_ri64(TCGContext *s, int const_arg, TCGArg arg)
517
-{
518
- if (const_arg) {
519
- tcg_debug_assert(const_arg == 1);
520
- tcg_out8(s, TCG_CONST);
521
- tcg_out64(s, arg);
522
- } else {
523
- tcg_out_r(s, arg);
524
- }
525
-}
526
-#endif
527
-
528
/* Write label. */
529
static void tci_out_label(TCGContext *s, TCGLabel *label)
530
{
89
{
531
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
90
return ts->state_ptr;
91
@@ -XXX,XX +XXX,XX @@ static void init_ts_info(OptContext *ctx, TCGTemp *ts)
92
ti->is_const = true;
93
ti->val = ts->val;
94
ti->z_mask = ts->val;
95
- ti->s_mask = smask_from_value(ts->val);
96
+ ti->s_mask = INT64_MIN >> clrsb64(ts->val);
97
} else {
98
ti->is_const = false;
99
ti->z_mask = -1;
100
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
101
*/
102
if (i == 0) {
103
ts_info(ts)->z_mask = ctx->z_mask;
104
- ts_info(ts)->s_mask = ctx->s_mask;
105
}
106
}
107
}
108
@@ -XXX,XX +XXX,XX @@ static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
109
* The passed s_mask may be augmented by z_mask.
110
*/
111
static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
112
- uint64_t z_mask, uint64_t s_mask)
113
+ uint64_t z_mask, int64_t s_mask)
532
{
114
{
533
uint8_t *old_code_ptr = s->code_ptr;
115
const TCGOpDef *def = &tcg_op_defs[op->opc];
534
tcg_out_op_t(s, INDEX_op_call);
116
TCGTemp *ts;
535
- tcg_out_ri(s, 1, (uintptr_t)arg);
117
TempOptInfo *ti;
536
+ tcg_out_i(s, (uintptr_t)arg);
118
+ int rep;
537
old_code_ptr[1] = s->code_ptr - old_code_ptr;
119
120
/* Only single-output opcodes are supported here. */
121
tcg_debug_assert(def->nb_oargs == 1);
122
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
123
*/
124
if (ctx->type == TCG_TYPE_I32) {
125
z_mask = (int32_t)z_mask;
126
- s_mask |= MAKE_64BIT_MASK(32, 32);
127
+ s_mask |= INT32_MIN;
128
}
129
130
if (z_mask == 0) {
131
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
132
133
ti = ts_info(ts);
134
ti->z_mask = z_mask;
135
- ti->s_mask = s_mask | smask_from_zmask(z_mask);
136
+
137
+ /* Canonicalize s_mask and incorporate data from z_mask. */
138
+ rep = clz64(~s_mask);
139
+ rep = MAX(rep, clz64(z_mask));
140
+ rep = MAX(rep - 1, 0);
141
+ ti->s_mask = INT64_MIN >> rep;
142
+
143
return true;
538
}
144
}
539
145
540
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
146
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
541
case INDEX_op_setcond_i32:
147
542
tcg_out_r(s, args[0]);
148
ctx->z_mask = z_mask;
543
tcg_out_r(s, args[1]);
149
ctx->s_mask = s_mask;
544
- tcg_out_ri32(s, const_args[2], args[2]);
150
- if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
545
+ tcg_out_r(s, args[2]);
151
+ if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
546
tcg_out8(s, args[3]); /* condition */
152
return true;
547
break;
153
}
548
#if TCG_TARGET_REG_BITS == 32
154
549
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
155
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
550
tcg_out_r(s, args[0]);
156
s_mask |= MAKE_64BIT_MASK(len, 64 - len);
551
tcg_out_r(s, args[1]);
157
ctx->s_mask = s_mask;
552
tcg_out_r(s, args[2]);
158
553
- tcg_out_ri32(s, const_args[3], args[3]);
159
- if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
554
- tcg_out_ri32(s, const_args[4], args[4]);
160
+ if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
555
+ tcg_out_r(s, args[3]);
161
return true;
556
+ tcg_out_r(s, args[4]);
162
}
557
tcg_out8(s, args[5]); /* condition */
163
558
break;
164
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
559
#elif TCG_TARGET_REG_BITS == 64
165
ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
560
case INDEX_op_setcond_i64:
166
561
tcg_out_r(s, args[0]);
167
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
562
tcg_out_r(s, args[1]);
168
- ctx->s_mask = smask_from_smask(s_mask);
563
- tcg_out_ri64(s, const_args[2], args[2]);
169
564
+ tcg_out_r(s, args[2]);
170
return fold_masks(ctx, op);
565
tcg_out8(s, args[3]); /* condition */
171
}
566
break;
567
#endif
568
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
569
case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
570
case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
571
tcg_out_r(s, args[0]);
572
- tcg_out_ri32(s, const_args[1], args[1]);
573
- tcg_out_ri32(s, const_args[2], args[2]);
574
+ tcg_out_r(s, args[1]);
575
+ tcg_out_r(s, args[2]);
576
break;
577
case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */
578
tcg_out_r(s, args[0]);
579
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
580
case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
581
case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
582
tcg_out_r(s, args[0]);
583
- tcg_out_ri64(s, const_args[1], args[1]);
584
- tcg_out_ri64(s, const_args[2], args[2]);
585
+ tcg_out_r(s, args[1]);
586
+ tcg_out_r(s, args[2]);
587
break;
588
case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */
589
tcg_out_r(s, args[0]);
590
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
591
break;
592
case INDEX_op_brcond_i64:
593
tcg_out_r(s, args[0]);
594
- tcg_out_ri64(s, const_args[1], args[1]);
595
+ tcg_out_r(s, args[1]);
596
tcg_out8(s, args[2]); /* condition */
597
tci_out_label(s, arg_label(args[3]));
598
break;
599
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
600
case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
601
case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
602
tcg_out_r(s, args[0]);
603
- tcg_out_ri32(s, const_args[1], args[1]);
604
- tcg_out_ri32(s, const_args[2], args[2]);
605
+ tcg_out_r(s, args[1]);
606
+ tcg_out_r(s, args[2]);
607
break;
608
#if TCG_TARGET_REG_BITS == 32
609
case INDEX_op_add2_i32:
610
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
611
case INDEX_op_brcond2_i32:
612
tcg_out_r(s, args[0]);
613
tcg_out_r(s, args[1]);
614
- tcg_out_ri32(s, const_args[2], args[2]);
615
- tcg_out_ri32(s, const_args[3], args[3]);
616
+ tcg_out_r(s, args[2]);
617
+ tcg_out_r(s, args[3]);
618
tcg_out8(s, args[4]); /* condition */
619
tci_out_label(s, arg_label(args[5]));
620
break;
621
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
622
#endif
623
case INDEX_op_brcond_i32:
624
tcg_out_r(s, args[0]);
625
- tcg_out_ri32(s, const_args[1], args[1]);
626
+ tcg_out_r(s, args[1]);
627
tcg_out8(s, args[2]); /* condition */
628
tci_out_label(s, arg_label(args[3]));
629
break;
630
--
172
--
631
2.25.1
173
2.43.0
632
633
diff view generated by jsdifflib
1
From: Eduardo Habkost <ehabkost@redhat.com>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
The TCG-specific CPU methods will be moved to a separate struct,
4
to make it easier to move accel-specific code outside generic CPU
5
code in the future. Start by moving tcg_initialize().
6
7
The new CPUClass.tcg_opts field may eventually become a pointer,
8
but keep it an embedded struct for now, to make code conversion
9
easier.
10
11
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
12
[claudio: move TCGCpuOperations inside include/hw/core/cpu.h]
13
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
14
Message-Id: <20210204163931.7358-2-cfontana@suse.de>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
3
---
17
include/hw/core/cpu.h | 16 +++++++++++++++-
4
tcg/optimize.c | 9 +++++----
18
cpu.c | 6 +++++-
5
1 file changed, 5 insertions(+), 4 deletions(-)
19
target/alpha/cpu.c | 2 +-
20
target/arm/cpu.c | 2 +-
21
target/avr/cpu.c | 2 +-
22
target/cris/cpu.c | 12 ++++++------
23
target/hppa/cpu.c | 2 +-
24
target/i386/tcg/tcg-cpu.c | 2 +-
25
target/lm32/cpu.c | 2 +-
26
target/m68k/cpu.c | 2 +-
27
target/microblaze/cpu.c | 2 +-
28
target/mips/cpu.c | 2 +-
29
target/moxie/cpu.c | 2 +-
30
target/nios2/cpu.c | 2 +-
31
target/openrisc/cpu.c | 2 +-
32
target/riscv/cpu.c | 2 +-
33
target/rx/cpu.c | 2 +-
34
target/s390x/cpu.c | 2 +-
35
target/sh4/cpu.c | 2 +-
36
target/sparc/cpu.c | 2 +-
37
target/tilegx/cpu.c | 2 +-
38
target/tricore/cpu.c | 2 +-
39
target/unicore32/cpu.c | 2 +-
40
target/xtensa/cpu.c | 2 +-
41
target/ppc/translate_init.c.inc | 2 +-
42
25 files changed, 48 insertions(+), 30 deletions(-)
43
6
44
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
45
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
46
--- a/include/hw/core/cpu.h
9
--- a/tcg/optimize.c
47
+++ b/include/hw/core/cpu.h
10
+++ b/tcg/optimize.c
48
@@ -XXX,XX +XXX,XX @@ typedef struct CPUWatchpoint CPUWatchpoint;
11
@@ -XXX,XX +XXX,XX @@ static void finish_ebb(OptContext *ctx)
49
12
remove_mem_copy_all(ctx);
50
struct TranslationBlock;
13
}
51
14
52
+/**
15
-static void finish_folding(OptContext *ctx, TCGOp *op)
53
+ * struct TcgCpuOperations: TCG operations specific to a CPU class
16
+static bool finish_folding(OptContext *ctx, TCGOp *op)
54
+ */
17
{
55
+typedef struct TcgCpuOperations {
18
const TCGOpDef *def = &tcg_op_defs[op->opc];
56
+ /**
19
int i, nb_oargs;
57
+ * @initialize: Initalize TCG state
20
@@ -XXX,XX +XXX,XX @@ static void finish_folding(OptContext *ctx, TCGOp *op)
58
+ *
21
ts_info(ts)->z_mask = ctx->z_mask;
59
+ * Called when the first CPU is realized.
22
}
60
+ */
23
}
61
+ void (*initialize)(void);
24
+ return true;
62
+
25
}
63
+} TcgCpuOperations;
64
+
65
/**
66
* CPUClass:
67
* @class_by_name: Callback to map -cpu command line model name to an
68
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
69
70
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
71
vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
72
- void (*tcg_initialize)(void);
73
74
const char *deprecation_note;
75
/* Keep non-pointer data at the end to minimize holes. */
76
int gdb_num_core_regs;
77
bool gdb_stop_before_watchpoint;
78
+
79
+ TcgCpuOperations tcg_ops;
80
};
81
26
82
/*
27
/*
83
diff --git a/cpu.c b/cpu.c
28
@@ -XXX,XX +XXX,XX @@ static bool fold_add(OptContext *ctx, TCGOp *op)
84
index XXXXXXX..XXXXXXX 100644
29
fold_xi_to_x(ctx, op, 0)) {
85
--- a/cpu.c
30
return true;
86
+++ b/cpu.c
87
@@ -XXX,XX +XXX,XX @@ void cpu_exec_initfn(CPUState *cpu)
88
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
89
{
90
CPUClass *cc = CPU_GET_CLASS(cpu);
91
+#ifdef CONFIG_TCG
92
static bool tcg_target_initialized;
93
+#endif /* CONFIG_TCG */
94
95
cpu_list_add(cpu);
96
97
+#ifdef CONFIG_TCG
98
if (tcg_enabled() && !tcg_target_initialized) {
99
tcg_target_initialized = true;
100
- cc->tcg_initialize();
101
+ cc->tcg_ops.initialize();
102
}
31
}
103
+#endif /* CONFIG_TCG */
32
- return false;
104
tlb_init(cpu);
33
+ return finish_folding(ctx, op);
105
106
qemu_plugin_vcpu_init_hook(cpu);
107
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
108
index XXXXXXX..XXXXXXX 100644
109
--- a/target/alpha/cpu.c
110
+++ b/target/alpha/cpu.c
111
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
112
dc->vmsd = &vmstate_alpha_cpu;
113
#endif
114
cc->disas_set_info = alpha_cpu_disas_set_info;
115
- cc->tcg_initialize = alpha_translate_init;
116
+ cc->tcg_ops.initialize = alpha_translate_init;
117
118
cc->gdb_num_core_regs = 67;
119
}
34
}
120
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
35
121
index XXXXXXX..XXXXXXX 100644
36
/* We cannot as yet do_constant_folding with vectors. */
122
--- a/target/arm/cpu.c
37
@@ -XXX,XX +XXX,XX @@ static bool fold_add_vec(OptContext *ctx, TCGOp *op)
123
+++ b/target/arm/cpu.c
38
fold_xi_to_x(ctx, op, 0)) {
124
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
39
return true;
125
cc->gdb_stop_before_watchpoint = true;
40
}
126
cc->disas_set_info = arm_disas_set_info;
41
- return false;
127
#ifdef CONFIG_TCG
42
+ return finish_folding(ctx, op);
128
- cc->tcg_initialize = arm_translate_init;
129
+ cc->tcg_ops.initialize = arm_translate_init;
130
cc->tlb_fill = arm_cpu_tlb_fill;
131
cc->debug_excp_handler = arm_debug_excp_handler;
132
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
133
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/avr/cpu.c
136
+++ b/target/avr/cpu.c
137
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
138
cc->tlb_fill = avr_cpu_tlb_fill;
139
cc->vmsd = &vms_avr_cpu;
140
cc->disas_set_info = avr_cpu_disas_set_info;
141
- cc->tcg_initialize = avr_cpu_tcg_init;
142
+ cc->tcg_ops.initialize = avr_cpu_tcg_init;
143
cc->synchronize_from_tb = avr_cpu_synchronize_from_tb;
144
cc->gdb_read_register = avr_cpu_gdb_read_register;
145
cc->gdb_write_register = avr_cpu_gdb_write_register;
146
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/target/cris/cpu.c
149
+++ b/target/cris/cpu.c
150
@@ -XXX,XX +XXX,XX @@ static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
151
ccc->vr = 8;
152
cc->do_interrupt = crisv10_cpu_do_interrupt;
153
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
154
- cc->tcg_initialize = cris_initialize_crisv10_tcg;
155
+ cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
156
}
43
}
157
44
158
static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
45
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
159
@@ -XXX,XX +XXX,XX @@ static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
46
@@ -XXX,XX +XXX,XX @@ static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
160
ccc->vr = 9;
47
op->args[4] = arg_new_constant(ctx, bl);
161
cc->do_interrupt = crisv10_cpu_do_interrupt;
48
op->args[5] = arg_new_constant(ctx, bh);
162
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
49
}
163
- cc->tcg_initialize = cris_initialize_crisv10_tcg;
50
- return false;
164
+ cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
51
+ return finish_folding(ctx, op);
165
}
52
}
166
53
167
static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
54
static bool fold_add2(OptContext *ctx, TCGOp *op)
168
@@ -XXX,XX +XXX,XX @@ static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
169
ccc->vr = 10;
170
cc->do_interrupt = crisv10_cpu_do_interrupt;
171
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
172
- cc->tcg_initialize = cris_initialize_crisv10_tcg;
173
+ cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
174
}
175
176
static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
177
@@ -XXX,XX +XXX,XX @@ static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
178
ccc->vr = 11;
179
cc->do_interrupt = crisv10_cpu_do_interrupt;
180
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
181
- cc->tcg_initialize = cris_initialize_crisv10_tcg;
182
+ cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
183
}
184
185
static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
186
@@ -XXX,XX +XXX,XX @@ static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
187
ccc->vr = 17;
188
cc->do_interrupt = crisv10_cpu_do_interrupt;
189
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
190
- cc->tcg_initialize = cris_initialize_crisv10_tcg;
191
+ cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
192
}
193
194
static void crisv32_cpu_class_init(ObjectClass *oc, void *data)
195
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
196
cc->gdb_stop_before_watchpoint = true;
197
198
cc->disas_set_info = cris_disas_set_info;
199
- cc->tcg_initialize = cris_initialize_tcg;
200
+ cc->tcg_ops.initialize = cris_initialize_tcg;
201
}
202
203
#define DEFINE_CRIS_CPU_TYPE(cpu_model, initfn) \
204
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/hppa/cpu.c
207
+++ b/target/hppa/cpu.c
208
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
209
#endif
210
cc->do_unaligned_access = hppa_cpu_do_unaligned_access;
211
cc->disas_set_info = hppa_cpu_disas_set_info;
212
- cc->tcg_initialize = hppa_translate_init;
213
+ cc->tcg_ops.initialize = hppa_translate_init;
214
215
cc->gdb_num_core_regs = 128;
216
}
217
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
218
index XXXXXXX..XXXXXXX 100644
219
--- a/target/i386/tcg/tcg-cpu.c
220
+++ b/target/i386/tcg/tcg-cpu.c
221
@@ -XXX,XX +XXX,XX @@ void tcg_cpu_common_class_init(CPUClass *cc)
222
cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
223
cc->cpu_exec_enter = x86_cpu_exec_enter;
224
cc->cpu_exec_exit = x86_cpu_exec_exit;
225
- cc->tcg_initialize = tcg_x86_init;
226
+ cc->tcg_ops.initialize = tcg_x86_init;
227
cc->tlb_fill = x86_cpu_tlb_fill;
228
#ifndef CONFIG_USER_ONLY
229
cc->debug_excp_handler = breakpoint_handler;
230
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
231
index XXXXXXX..XXXXXXX 100644
232
--- a/target/lm32/cpu.c
233
+++ b/target/lm32/cpu.c
234
@@ -XXX,XX +XXX,XX @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
235
cc->gdb_stop_before_watchpoint = true;
236
cc->debug_excp_handler = lm32_debug_excp_handler;
237
cc->disas_set_info = lm32_cpu_disas_set_info;
238
- cc->tcg_initialize = lm32_translate_init;
239
+ cc->tcg_ops.initialize = lm32_translate_init;
240
}
241
242
#define DEFINE_LM32_CPU_TYPE(cpu_model, initfn) \
243
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
244
index XXXXXXX..XXXXXXX 100644
245
--- a/target/m68k/cpu.c
246
+++ b/target/m68k/cpu.c
247
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
248
dc->vmsd = &vmstate_m68k_cpu;
249
#endif
250
cc->disas_set_info = m68k_cpu_disas_set_info;
251
- cc->tcg_initialize = m68k_tcg_init;
252
+ cc->tcg_ops.initialize = m68k_tcg_init;
253
254
cc->gdb_num_core_regs = 18;
255
}
256
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
257
index XXXXXXX..XXXXXXX 100644
258
--- a/target/microblaze/cpu.c
259
+++ b/target/microblaze/cpu.c
260
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
261
cc->gdb_num_core_regs = 32 + 27;
262
263
cc->disas_set_info = mb_disas_set_info;
264
- cc->tcg_initialize = mb_tcg_init;
265
+ cc->tcg_ops.initialize = mb_tcg_init;
266
}
267
268
static const TypeInfo mb_cpu_type_info = {
269
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
270
index XXXXXXX..XXXXXXX 100644
271
--- a/target/mips/cpu.c
272
+++ b/target/mips/cpu.c
273
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
274
#endif
275
cc->disas_set_info = mips_cpu_disas_set_info;
276
#ifdef CONFIG_TCG
277
- cc->tcg_initialize = mips_tcg_init;
278
+ cc->tcg_ops.initialize = mips_tcg_init;
279
cc->tlb_fill = mips_cpu_tlb_fill;
280
#endif
281
282
diff --git a/target/moxie/cpu.c b/target/moxie/cpu.c
283
index XXXXXXX..XXXXXXX 100644
284
--- a/target/moxie/cpu.c
285
+++ b/target/moxie/cpu.c
286
@@ -XXX,XX +XXX,XX @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
287
cc->vmsd = &vmstate_moxie_cpu;
288
#endif
289
cc->disas_set_info = moxie_cpu_disas_set_info;
290
- cc->tcg_initialize = moxie_translate_init;
291
+ cc->tcg_ops.initialize = moxie_translate_init;
292
}
293
294
static void moxielite_initfn(Object *obj)
295
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
296
index XXXXXXX..XXXXXXX 100644
297
--- a/target/nios2/cpu.c
298
+++ b/target/nios2/cpu.c
299
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
300
cc->gdb_read_register = nios2_cpu_gdb_read_register;
301
cc->gdb_write_register = nios2_cpu_gdb_write_register;
302
cc->gdb_num_core_regs = 49;
303
- cc->tcg_initialize = nios2_tcg_init;
304
+ cc->tcg_ops.initialize = nios2_tcg_init;
305
}
306
307
static const TypeInfo nios2_cpu_type_info = {
308
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
309
index XXXXXXX..XXXXXXX 100644
310
--- a/target/openrisc/cpu.c
311
+++ b/target/openrisc/cpu.c
312
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
313
dc->vmsd = &vmstate_openrisc_cpu;
314
#endif
315
cc->gdb_num_core_regs = 32 + 3;
316
- cc->tcg_initialize = openrisc_translate_init;
317
+ cc->tcg_ops.initialize = openrisc_translate_init;
318
cc->disas_set_info = openrisc_disas_set_info;
319
}
320
321
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
322
index XXXXXXX..XXXXXXX 100644
323
--- a/target/riscv/cpu.c
324
+++ b/target/riscv/cpu.c
325
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
326
cc->gdb_arch_name = riscv_gdb_arch_name;
327
cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
328
#ifdef CONFIG_TCG
329
- cc->tcg_initialize = riscv_translate_init;
330
+ cc->tcg_ops.initialize = riscv_translate_init;
331
cc->tlb_fill = riscv_cpu_tlb_fill;
332
#endif
333
device_class_set_props(dc, riscv_cpu_properties);
334
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
335
index XXXXXXX..XXXXXXX 100644
336
--- a/target/rx/cpu.c
337
+++ b/target/rx/cpu.c
338
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
339
cc->gdb_write_register = rx_cpu_gdb_write_register;
340
cc->get_phys_page_debug = rx_cpu_get_phys_page_debug;
341
cc->disas_set_info = rx_cpu_disas_set_info;
342
- cc->tcg_initialize = rx_translate_init;
343
+ cc->tcg_ops.initialize = rx_translate_init;
344
cc->tlb_fill = rx_cpu_tlb_fill;
345
346
cc->gdb_num_core_regs = 26;
347
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
348
index XXXXXXX..XXXXXXX 100644
349
--- a/target/s390x/cpu.c
350
+++ b/target/s390x/cpu.c
351
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
352
#endif
353
cc->disas_set_info = s390_cpu_disas_set_info;
354
#ifdef CONFIG_TCG
355
- cc->tcg_initialize = s390x_translate_init;
356
+ cc->tcg_ops.initialize = s390x_translate_init;
357
cc->tlb_fill = s390_cpu_tlb_fill;
358
#endif
359
360
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
361
index XXXXXXX..XXXXXXX 100644
362
--- a/target/sh4/cpu.c
363
+++ b/target/sh4/cpu.c
364
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
365
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
366
#endif
367
cc->disas_set_info = superh_cpu_disas_set_info;
368
- cc->tcg_initialize = sh4_translate_init;
369
+ cc->tcg_ops.initialize = sh4_translate_init;
370
371
cc->gdb_num_core_regs = 59;
372
373
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
374
index XXXXXXX..XXXXXXX 100644
375
--- a/target/sparc/cpu.c
376
+++ b/target/sparc/cpu.c
377
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
378
cc->vmsd = &vmstate_sparc_cpu;
379
#endif
380
cc->disas_set_info = cpu_sparc_disas_set_info;
381
- cc->tcg_initialize = sparc_tcg_init;
382
+ cc->tcg_ops.initialize = sparc_tcg_init;
383
384
#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
385
cc->gdb_num_core_regs = 86;
386
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
387
index XXXXXXX..XXXXXXX 100644
388
--- a/target/tilegx/cpu.c
389
+++ b/target/tilegx/cpu.c
390
@@ -XXX,XX +XXX,XX @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
391
cc->set_pc = tilegx_cpu_set_pc;
392
cc->tlb_fill = tilegx_cpu_tlb_fill;
393
cc->gdb_num_core_regs = 0;
394
- cc->tcg_initialize = tilegx_tcg_init;
395
+ cc->tcg_ops.initialize = tilegx_tcg_init;
396
}
397
398
static const TypeInfo tilegx_cpu_type_info = {
399
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
400
index XXXXXXX..XXXXXXX 100644
401
--- a/target/tricore/cpu.c
402
+++ b/target/tricore/cpu.c
403
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
404
cc->set_pc = tricore_cpu_set_pc;
405
cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
406
cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug;
407
- cc->tcg_initialize = tricore_tcg_init;
408
+ cc->tcg_ops.initialize = tricore_tcg_init;
409
cc->tlb_fill = tricore_cpu_tlb_fill;
410
}
411
412
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
413
index XXXXXXX..XXXXXXX 100644
414
--- a/target/unicore32/cpu.c
415
+++ b/target/unicore32/cpu.c
416
@@ -XXX,XX +XXX,XX @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
417
cc->set_pc = uc32_cpu_set_pc;
418
cc->tlb_fill = uc32_cpu_tlb_fill;
419
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
420
- cc->tcg_initialize = uc32_translate_init;
421
+ cc->tcg_ops.initialize = uc32_translate_init;
422
dc->vmsd = &vmstate_uc32_cpu;
423
}
424
425
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
426
index XXXXXXX..XXXXXXX 100644
427
--- a/target/xtensa/cpu.c
428
+++ b/target/xtensa/cpu.c
429
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
430
#endif
431
cc->debug_excp_handler = xtensa_breakpoint_handler;
432
cc->disas_set_info = xtensa_cpu_disas_set_info;
433
- cc->tcg_initialize = xtensa_translate_init;
434
+ cc->tcg_ops.initialize = xtensa_translate_init;
435
dc->vmsd = &vmstate_xtensa_cpu;
436
}
437
438
diff --git a/target/ppc/translate_init.c.inc b/target/ppc/translate_init.c.inc
439
index XXXXXXX..XXXXXXX 100644
440
--- a/target/ppc/translate_init.c.inc
441
+++ b/target/ppc/translate_init.c.inc
442
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
443
cc->virtio_is_big_endian = ppc_cpu_is_big_endian;
444
#endif
445
#ifdef CONFIG_TCG
446
- cc->tcg_initialize = ppc_translate_init;
447
+ cc->tcg_ops.initialize = ppc_translate_init;
448
cc->tlb_fill = ppc_cpu_tlb_fill;
449
#endif
450
#ifndef CONFIG_USER_ONLY
451
--
55
--
452
2.25.1
56
2.43.0
453
454
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Introduce ti_is_const, ti_const_val, ti_is_const_val.
2
2
3
This will allow us to centralize the registration of
4
the cpus.c module accelerator operations (in accel/accel-softmmu.c),
5
and trigger it automatically using object hierarchy lookup from the
6
new accel_init_interfaces() initialization step, depending just on
7
which accelerators are available in the code.
8
9
Rename all tcg-cpus.c, kvm-cpus.c, etc to tcg-accel-ops.c,
10
kvm-accel-ops.c, etc, matching the object type names.
11
12
Signed-off-by: Claudio Fontana <cfontana@suse.de>
13
Message-Id: <20210204163931.7358-18-cfontana@suse.de>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
4
---
16
accel/accel-softmmu.h | 15 ++++++
5
tcg/optimize.c | 20 +++++++++++++++++---
17
accel/kvm/kvm-cpus.h | 2 -
6
1 file changed, 17 insertions(+), 3 deletions(-)
18
...g-cpus-icount.h => tcg-accel-ops-icount.h} | 2 +
19
accel/tcg/tcg-accel-ops-mttcg.h | 19 ++++++++
20
.../tcg/{tcg-cpus-rr.h => tcg-accel-ops-rr.h} | 0
21
accel/tcg/{tcg-cpus.h => tcg-accel-ops.h} | 6 +--
22
include/qemu/accel.h | 2 +
23
include/sysemu/accel-ops.h | 45 ++++++++++++++++++
24
include/sysemu/cpus.h | 26 ++--------
25
.../i386/hax/{hax-cpus.h => hax-accel-ops.h} | 2 -
26
target/i386/hax/hax-windows.h | 2 +-
27
.../i386/hvf/{hvf-cpus.h => hvf-accel-ops.h} | 2 -
28
.../whpx/{whpx-cpus.h => whpx-accel-ops.h} | 2 -
29
accel/accel-common.c | 11 +++++
30
accel/accel-softmmu.c | 44 +++++++++++++++--
31
accel/kvm/{kvm-cpus.c => kvm-accel-ops.c} | 28 ++++++++---
32
accel/kvm/kvm-all.c | 2 -
33
accel/qtest/qtest.c | 23 ++++++---
34
...g-cpus-icount.c => tcg-accel-ops-icount.c} | 21 +++------
35
...tcg-cpus-mttcg.c => tcg-accel-ops-mttcg.c} | 14 ++----
36
.../tcg/{tcg-cpus-rr.c => tcg-accel-ops-rr.c} | 13 ++---
37
accel/tcg/{tcg-cpus.c => tcg-accel-ops.c} | 47 ++++++++++++++++++-
38
accel/tcg/tcg-all.c | 12 -----
39
accel/xen/xen-all.c | 24 ++++++----
40
bsd-user/main.c | 3 +-
41
linux-user/main.c | 1 +
42
softmmu/cpus.c | 12 ++---
43
softmmu/vl.c | 7 ++-
44
.../i386/hax/{hax-cpus.c => hax-accel-ops.c} | 33 +++++++++----
45
target/i386/hax/hax-all.c | 5 +-
46
target/i386/hax/hax-mem.c | 2 +-
47
target/i386/hax/hax-posix.c | 2 +-
48
target/i386/hax/hax-windows.c | 2 +-
49
.../i386/hvf/{hvf-cpus.c => hvf-accel-ops.c} | 29 +++++++++---
50
target/i386/hvf/hvf.c | 3 +-
51
target/i386/hvf/x86hvf.c | 2 +-
52
.../whpx/{whpx-cpus.c => whpx-accel-ops.c} | 33 +++++++++----
53
target/i386/whpx/whpx-all.c | 7 +--
54
MAINTAINERS | 3 +-
55
accel/kvm/meson.build | 2 +-
56
accel/tcg/meson.build | 8 ++--
57
target/i386/hax/meson.build | 2 +-
58
target/i386/hvf/meson.build | 2 +-
59
target/i386/whpx/meson.build | 2 +-
60
44 files changed, 361 insertions(+), 163 deletions(-)
61
create mode 100644 accel/accel-softmmu.h
62
rename accel/tcg/{tcg-cpus-icount.h => tcg-accel-ops-icount.h} (88%)
63
create mode 100644 accel/tcg/tcg-accel-ops-mttcg.h
64
rename accel/tcg/{tcg-cpus-rr.h => tcg-accel-ops-rr.h} (100%)
65
rename accel/tcg/{tcg-cpus.h => tcg-accel-ops.h} (72%)
66
create mode 100644 include/sysemu/accel-ops.h
67
rename target/i386/hax/{hax-cpus.h => hax-accel-ops.h} (95%)
68
rename target/i386/hvf/{hvf-cpus.h => hvf-accel-ops.h} (94%)
69
rename target/i386/whpx/{whpx-cpus.h => whpx-accel-ops.h} (96%)
70
rename accel/kvm/{kvm-cpus.c => kvm-accel-ops.c} (72%)
71
rename accel/tcg/{tcg-cpus-icount.c => tcg-accel-ops-icount.c} (89%)
72
rename accel/tcg/{tcg-cpus-mttcg.c => tcg-accel-ops-mttcg.c} (92%)
73
rename accel/tcg/{tcg-cpus-rr.c => tcg-accel-ops-rr.c} (97%)
74
rename accel/tcg/{tcg-cpus.c => tcg-accel-ops.c} (63%)
75
rename target/i386/hax/{hax-cpus.c => hax-accel-ops.c} (69%)
76
rename target/i386/hvf/{hvf-cpus.c => hvf-accel-ops.c} (84%)
77
rename target/i386/whpx/{whpx-cpus.c => whpx-accel-ops.c} (71%)
78
7
79
diff --git a/accel/accel-softmmu.h b/accel/accel-softmmu.h
8
diff --git a/tcg/optimize.c b/tcg/optimize.c
80
new file mode 100644
81
index XXXXXXX..XXXXXXX
82
--- /dev/null
83
+++ b/accel/accel-softmmu.h
84
@@ -XXX,XX +XXX,XX @@
85
+/*
86
+ * QEMU System Emulation accel internal functions
87
+ *
88
+ * Copyright 2021 SUSE LLC
89
+ *
90
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
91
+ * See the COPYING file in the top-level directory.
92
+ */
93
+
94
+#ifndef ACCEL_SOFTMMU_H
95
+#define ACCEL_SOFTMMU_H
96
+
97
+void accel_init_ops_interfaces(AccelClass *ac);
98
+
99
+#endif /* ACCEL_SOFTMMU_H */
100
diff --git a/accel/kvm/kvm-cpus.h b/accel/kvm/kvm-cpus.h
101
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
102
--- a/accel/kvm/kvm-cpus.h
10
--- a/tcg/optimize.c
103
+++ b/accel/kvm/kvm-cpus.h
11
+++ b/tcg/optimize.c
104
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@ static inline TempOptInfo *arg_info(TCGArg arg)
105
13
return ts_info(arg_temp(arg));
106
#include "sysemu/cpus.h"
107
108
-extern const CpusAccel kvm_cpus;
109
-
110
int kvm_init_vcpu(CPUState *cpu, Error **errp);
111
int kvm_cpu_exec(CPUState *cpu);
112
void kvm_destroy_vcpu(CPUState *cpu);
113
diff --git a/accel/tcg/tcg-cpus-icount.h b/accel/tcg/tcg-accel-ops-icount.h
114
similarity index 88%
115
rename from accel/tcg/tcg-cpus-icount.h
116
rename to accel/tcg/tcg-accel-ops-icount.h
117
index XXXXXXX..XXXXXXX 100644
118
--- a/accel/tcg/tcg-cpus-icount.h
119
+++ b/accel/tcg/tcg-accel-ops-icount.h
120
@@ -XXX,XX +XXX,XX @@ void icount_handle_deadline(void);
121
void icount_prepare_for_run(CPUState *cpu);
122
void icount_process_data(CPUState *cpu);
123
124
+void icount_handle_interrupt(CPUState *cpu, int mask);
125
+
126
#endif /* TCG_CPUS_ICOUNT_H */
127
diff --git a/accel/tcg/tcg-accel-ops-mttcg.h b/accel/tcg/tcg-accel-ops-mttcg.h
128
new file mode 100644
129
index XXXXXXX..XXXXXXX
130
--- /dev/null
131
+++ b/accel/tcg/tcg-accel-ops-mttcg.h
132
@@ -XXX,XX +XXX,XX @@
133
+/*
134
+ * QEMU TCG Multi Threaded vCPUs implementation
135
+ *
136
+ * Copyright 2021 SUSE LLC
137
+ *
138
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
139
+ * See the COPYING file in the top-level directory.
140
+ */
141
+
142
+#ifndef TCG_CPUS_MTTCG_H
143
+#define TCG_CPUS_MTTCG_H
144
+
145
+/* kick MTTCG vCPU thread */
146
+void mttcg_kick_vcpu_thread(CPUState *cpu);
147
+
148
+/* start an mttcg vCPU thread */
149
+void mttcg_start_vcpu_thread(CPUState *cpu);
150
+
151
+#endif /* TCG_CPUS_MTTCG_H */
152
diff --git a/accel/tcg/tcg-cpus-rr.h b/accel/tcg/tcg-accel-ops-rr.h
153
similarity index 100%
154
rename from accel/tcg/tcg-cpus-rr.h
155
rename to accel/tcg/tcg-accel-ops-rr.h
156
diff --git a/accel/tcg/tcg-cpus.h b/accel/tcg/tcg-accel-ops.h
157
similarity index 72%
158
rename from accel/tcg/tcg-cpus.h
159
rename to accel/tcg/tcg-accel-ops.h
160
index XXXXXXX..XXXXXXX 100644
161
--- a/accel/tcg/tcg-cpus.h
162
+++ b/accel/tcg/tcg-accel-ops.h
163
@@ -XXX,XX +XXX,XX @@
164
165
#include "sysemu/cpus.h"
166
167
-extern const CpusAccel tcg_cpus_mttcg;
168
-extern const CpusAccel tcg_cpus_icount;
169
-extern const CpusAccel tcg_cpus_rr;
170
-
171
void tcg_cpus_destroy(CPUState *cpu);
172
int tcg_cpus_exec(CPUState *cpu);
173
-void tcg_cpus_handle_interrupt(CPUState *cpu, int mask);
174
+void tcg_handle_interrupt(CPUState *cpu, int mask);
175
176
#endif /* TCG_CPUS_H */
177
diff --git a/include/qemu/accel.h b/include/qemu/accel.h
178
index XXXXXXX..XXXXXXX 100644
179
--- a/include/qemu/accel.h
180
+++ b/include/qemu/accel.h
181
@@ -XXX,XX +XXX,XX @@ typedef struct AccelClass {
182
AccelClass *accel_find(const char *opt_name);
183
AccelState *current_accel(void);
184
185
+void accel_init_interfaces(AccelClass *ac);
186
+
187
#ifndef CONFIG_USER_ONLY
188
int accel_init_machine(AccelState *accel, MachineState *ms);
189
190
diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h
191
new file mode 100644
192
index XXXXXXX..XXXXXXX
193
--- /dev/null
194
+++ b/include/sysemu/accel-ops.h
195
@@ -XXX,XX +XXX,XX @@
196
+/*
197
+ * Accelerator OPS, used for cpus.c module
198
+ *
199
+ * Copyright 2021 SUSE LLC
200
+ *
201
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
202
+ * See the COPYING file in the top-level directory.
203
+ */
204
+
205
+#ifndef ACCEL_OPS_H
206
+#define ACCEL_OPS_H
207
+
208
+#include "qom/object.h"
209
+
210
+#define ACCEL_OPS_SUFFIX "-ops"
211
+#define TYPE_ACCEL_OPS "accel" ACCEL_OPS_SUFFIX
212
+#define ACCEL_OPS_NAME(name) (name "-" TYPE_ACCEL_OPS)
213
+
214
+typedef struct AccelOpsClass AccelOpsClass;
215
+DECLARE_CLASS_CHECKERS(AccelOpsClass, ACCEL_OPS, TYPE_ACCEL_OPS)
216
+
217
+/* cpus.c operations interface */
218
+struct AccelOpsClass {
219
+ /*< private >*/
220
+ ObjectClass parent_class;
221
+ /*< public >*/
222
+
223
+ /* initialization function called when accel is chosen */
224
+ void (*ops_init)(AccelOpsClass *ops);
225
+
226
+ void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY NON-NULL */
227
+ void (*kick_vcpu_thread)(CPUState *cpu);
228
+
229
+ void (*synchronize_post_reset)(CPUState *cpu);
230
+ void (*synchronize_post_init)(CPUState *cpu);
231
+ void (*synchronize_state)(CPUState *cpu);
232
+ void (*synchronize_pre_loadvm)(CPUState *cpu);
233
+
234
+ void (*handle_interrupt)(CPUState *cpu, int mask);
235
+
236
+ int64_t (*get_virtual_clock)(void);
237
+ int64_t (*get_elapsed_ticks)(void);
238
+};
239
+
240
+#endif /* ACCEL_OPS_H */
241
diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
242
index XXXXXXX..XXXXXXX 100644
243
--- a/include/sysemu/cpus.h
244
+++ b/include/sysemu/cpus.h
245
@@ -XXX,XX +XXX,XX @@
246
#define QEMU_CPUS_H
247
248
#include "qemu/timer.h"
249
+#include "sysemu/accel-ops.h"
250
251
-/* cpus.c */
252
+/* register accel-specific operations */
253
+void cpus_register_accel(const AccelOpsClass *i);
254
255
-/* CPU execution threads */
256
+/* accel/dummy-cpus.c */
257
258
-typedef struct CpusAccel {
259
- void (*create_vcpu_thread)(CPUState *cpu); /* MANDATORY */
260
- void (*kick_vcpu_thread)(CPUState *cpu);
261
-
262
- void (*synchronize_post_reset)(CPUState *cpu);
263
- void (*synchronize_post_init)(CPUState *cpu);
264
- void (*synchronize_state)(CPUState *cpu);
265
- void (*synchronize_pre_loadvm)(CPUState *cpu);
266
-
267
- void (*handle_interrupt)(CPUState *cpu, int mask);
268
-
269
- int64_t (*get_virtual_clock)(void);
270
- int64_t (*get_elapsed_ticks)(void);
271
-} CpusAccel;
272
-
273
-/* register accel-specific cpus interface implementation */
274
-void cpus_register_accel(const CpusAccel *i);
275
-
276
-/* Create a dummy vcpu for CpusAccel->create_vcpu_thread */
277
+/* Create a dummy vcpu for AccelOpsClass->create_vcpu_thread */
278
void dummy_start_vcpu_thread(CPUState *);
279
280
/* interface available for cpus accelerator threads */
281
diff --git a/target/i386/hax/hax-cpus.h b/target/i386/hax/hax-accel-ops.h
282
similarity index 95%
283
rename from target/i386/hax/hax-cpus.h
284
rename to target/i386/hax/hax-accel-ops.h
285
index XXXXXXX..XXXXXXX 100644
286
--- a/target/i386/hax/hax-cpus.h
287
+++ b/target/i386/hax/hax-accel-ops.h
288
@@ -XXX,XX +XXX,XX @@
289
290
#include "sysemu/cpus.h"
291
292
-extern const CpusAccel hax_cpus;
293
-
294
#include "hax-interface.h"
295
#include "hax-i386.h"
296
297
diff --git a/target/i386/hax/hax-windows.h b/target/i386/hax/hax-windows.h
298
index XXXXXXX..XXXXXXX 100644
299
--- a/target/i386/hax/hax-windows.h
300
+++ b/target/i386/hax/hax-windows.h
301
@@ -XXX,XX +XXX,XX @@
302
#include <winioctl.h>
303
#include <windef.h>
304
305
-#include "hax-cpus.h"
306
+#include "hax-accel-ops.h"
307
308
#define HAX_INVALID_FD INVALID_HANDLE_VALUE
309
310
diff --git a/target/i386/hvf/hvf-cpus.h b/target/i386/hvf/hvf-accel-ops.h
311
similarity index 94%
312
rename from target/i386/hvf/hvf-cpus.h
313
rename to target/i386/hvf/hvf-accel-ops.h
314
index XXXXXXX..XXXXXXX 100644
315
--- a/target/i386/hvf/hvf-cpus.h
316
+++ b/target/i386/hvf/hvf-accel-ops.h
317
@@ -XXX,XX +XXX,XX @@
318
319
#include "sysemu/cpus.h"
320
321
-extern const CpusAccel hvf_cpus;
322
-
323
int hvf_init_vcpu(CPUState *);
324
int hvf_vcpu_exec(CPUState *);
325
void hvf_cpu_synchronize_state(CPUState *);
326
diff --git a/target/i386/whpx/whpx-cpus.h b/target/i386/whpx/whpx-accel-ops.h
327
similarity index 96%
328
rename from target/i386/whpx/whpx-cpus.h
329
rename to target/i386/whpx/whpx-accel-ops.h
330
index XXXXXXX..XXXXXXX 100644
331
--- a/target/i386/whpx/whpx-cpus.h
332
+++ b/target/i386/whpx/whpx-accel-ops.h
333
@@ -XXX,XX +XXX,XX @@
334
335
#include "sysemu/cpus.h"
336
337
-extern const CpusAccel whpx_cpus;
338
-
339
int whpx_init_vcpu(CPUState *cpu);
340
int whpx_vcpu_exec(CPUState *cpu);
341
void whpx_destroy_vcpu(CPUState *cpu);
342
diff --git a/accel/accel-common.c b/accel/accel-common.c
343
index XXXXXXX..XXXXXXX 100644
344
--- a/accel/accel-common.c
345
+++ b/accel/accel-common.c
346
@@ -XXX,XX +XXX,XX @@
347
#include "qemu/osdep.h"
348
#include "qemu/accel.h"
349
350
+#ifndef CONFIG_USER_ONLY
351
+#include "accel-softmmu.h"
352
+#endif /* !CONFIG_USER_ONLY */
353
+
354
static const TypeInfo accel_type = {
355
.name = TYPE_ACCEL,
356
.parent = TYPE_OBJECT,
357
@@ -XXX,XX +XXX,XX @@ AccelClass *accel_find(const char *opt_name)
358
return ac;
359
}
14
}
360
15
361
+void accel_init_interfaces(AccelClass *ac)
16
+static inline bool ti_is_const(TempOptInfo *ti)
362
+{
17
+{
363
+#ifndef CONFIG_USER_ONLY
18
+ return ti->is_const;
364
+ accel_init_ops_interfaces(ac);
365
+#endif /* !CONFIG_USER_ONLY */
366
+}
19
+}
367
+
20
+
368
static void register_accel_types(void)
21
+static inline uint64_t ti_const_val(TempOptInfo *ti)
369
{
370
type_register_static(&accel_type);
371
diff --git a/accel/accel-softmmu.c b/accel/accel-softmmu.c
372
index XXXXXXX..XXXXXXX 100644
373
--- a/accel/accel-softmmu.c
374
+++ b/accel/accel-softmmu.c
375
@@ -XXX,XX +XXX,XX @@
376
#include "qemu/osdep.h"
377
#include "qemu/accel.h"
378
#include "hw/boards.h"
379
-#include "sysemu/arch_init.h"
380
-#include "sysemu/sysemu.h"
381
-#include "qom/object.h"
382
+#include "sysemu/cpus.h"
383
+
384
+#include "accel-softmmu.h"
385
386
int accel_init_machine(AccelState *accel, MachineState *ms)
387
{
388
@@ -XXX,XX +XXX,XX @@ void accel_setup_post(MachineState *ms)
389
acc->setup_post(ms, accel);
390
}
391
}
392
+
393
+/* initialize the arch-independent accel operation interfaces */
394
+void accel_init_ops_interfaces(AccelClass *ac)
395
+{
22
+{
396
+ const char *ac_name;
23
+ return ti->val;
397
+ char *ops_name;
398
+ AccelOpsClass *ops;
399
+
400
+ ac_name = object_class_get_name(OBJECT_CLASS(ac));
401
+ g_assert(ac_name != NULL);
402
+
403
+ ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
404
+ ops = ACCEL_OPS_CLASS(object_class_by_name(ops_name));
405
+ g_free(ops_name);
406
+
407
+ /*
408
+ * all accelerators need to define ops, providing at least a mandatory
409
+ * non-NULL create_vcpu_thread operation.
410
+ */
411
+ g_assert(ops != NULL);
412
+ if (ops->ops_init) {
413
+ ops->ops_init(ops);
414
+ }
415
+ cpus_register_accel(ops);
416
+}
24
+}
417
+
25
+
418
+static const TypeInfo accel_ops_type_info = {
26
+static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
419
+ .name = TYPE_ACCEL_OPS,
420
+ .parent = TYPE_OBJECT,
421
+ .abstract = true,
422
+ .class_size = sizeof(AccelOpsClass),
423
+};
424
+
425
+static void accel_softmmu_register_types(void)
426
+{
27
+{
427
+ type_register_static(&accel_ops_type_info);
28
+ return ti_is_const(ti) && ti_const_val(ti) == val;
428
+}
429
+type_init(accel_softmmu_register_types);
430
diff --git a/accel/kvm/kvm-cpus.c b/accel/kvm/kvm-accel-ops.c
431
similarity index 72%
432
rename from accel/kvm/kvm-cpus.c
433
rename to accel/kvm/kvm-accel-ops.c
434
index XXXXXXX..XXXXXXX 100644
435
--- a/accel/kvm/kvm-cpus.c
436
+++ b/accel/kvm/kvm-accel-ops.c
437
@@ -XXX,XX +XXX,XX @@ static void kvm_start_vcpu_thread(CPUState *cpu)
438
cpu, QEMU_THREAD_JOINABLE);
439
}
440
441
-const CpusAccel kvm_cpus = {
442
- .create_vcpu_thread = kvm_start_vcpu_thread,
443
+static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
444
+{
445
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
446
447
- .synchronize_post_reset = kvm_cpu_synchronize_post_reset,
448
- .synchronize_post_init = kvm_cpu_synchronize_post_init,
449
- .synchronize_state = kvm_cpu_synchronize_state,
450
- .synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm,
451
+ ops->create_vcpu_thread = kvm_start_vcpu_thread;
452
+ ops->synchronize_post_reset = kvm_cpu_synchronize_post_reset;
453
+ ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
454
+ ops->synchronize_state = kvm_cpu_synchronize_state;
455
+ ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
456
+}
29
+}
457
+
30
+
458
+static const TypeInfo kvm_accel_ops_type = {
31
static inline bool ts_is_const(TCGTemp *ts)
459
+ .name = ACCEL_OPS_NAME("kvm"),
460
+
461
+ .parent = TYPE_ACCEL_OPS,
462
+ .class_init = kvm_accel_ops_class_init,
463
+ .abstract = true,
464
};
465
+
466
+static void kvm_accel_ops_register_types(void)
467
+{
468
+ type_register_static(&kvm_accel_ops_type);
469
+}
470
+type_init(kvm_accel_ops_register_types);
471
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
472
index XXXXXXX..XXXXXXX 100644
473
--- a/accel/kvm/kvm-all.c
474
+++ b/accel/kvm/kvm-all.c
475
@@ -XXX,XX +XXX,XX @@ static int kvm_init(MachineState *ms)
476
ret = ram_block_discard_disable(true);
477
assert(!ret);
478
}
479
-
480
- cpus_register_accel(&kvm_cpus);
481
return 0;
482
483
err:
484
diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c
485
index XXXXXXX..XXXXXXX 100644
486
--- a/accel/qtest/qtest.c
487
+++ b/accel/qtest/qtest.c
488
@@ -XXX,XX +XXX,XX @@
489
#include "qemu/main-loop.h"
490
#include "hw/core/cpu.h"
491
492
-const CpusAccel qtest_cpus = {
493
- .create_vcpu_thread = dummy_start_vcpu_thread,
494
- .get_virtual_clock = qtest_get_virtual_clock,
495
-};
496
-
497
static int qtest_init_accel(MachineState *ms)
498
{
32
{
499
- cpus_register_accel(&qtest_cpus);
33
- return ts_info(ts)->is_const;
500
return 0;
34
+ return ti_is_const(ts_info(ts));
501
}
35
}
502
36
503
@@ -XXX,XX +XXX,XX @@ static const TypeInfo qtest_accel_type = {
37
static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
504
.class_init = qtest_accel_class_init,
505
};
506
507
+static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
508
+{
509
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
510
+
511
+ ops->create_vcpu_thread = dummy_start_vcpu_thread;
512
+ ops->get_virtual_clock = qtest_get_virtual_clock;
513
+};
514
+
515
+static const TypeInfo qtest_accel_ops_type = {
516
+ .name = ACCEL_OPS_NAME("qtest"),
517
+
518
+ .parent = TYPE_ACCEL_OPS,
519
+ .class_init = qtest_accel_ops_class_init,
520
+ .abstract = true,
521
+};
522
+
523
static void qtest_type_init(void)
524
{
38
{
525
type_register_static(&qtest_accel_type);
39
- TempOptInfo *ti = ts_info(ts);
526
+ type_register_static(&qtest_accel_ops_type);
40
- return ti->is_const && ti->val == val;
41
+ return ti_is_const_val(ts_info(ts), val);
527
}
42
}
528
43
529
type_init(qtest_type_init);
44
static inline bool arg_is_const(TCGArg arg)
530
diff --git a/accel/tcg/tcg-cpus-icount.c b/accel/tcg/tcg-accel-ops-icount.c
531
similarity index 89%
532
rename from accel/tcg/tcg-cpus-icount.c
533
rename to accel/tcg/tcg-accel-ops-icount.c
534
index XXXXXXX..XXXXXXX 100644
535
--- a/accel/tcg/tcg-cpus-icount.c
536
+++ b/accel/tcg/tcg-accel-ops-icount.c
537
@@ -XXX,XX +XXX,XX @@
538
#include "exec/exec-all.h"
539
#include "hw/boards.h"
540
541
-#include "tcg-cpus.h"
542
-#include "tcg-cpus-icount.h"
543
-#include "tcg-cpus-rr.h"
544
+#include "tcg-accel-ops.h"
545
+#include "tcg-accel-ops-icount.h"
546
+#include "tcg-accel-ops-rr.h"
547
548
static int64_t icount_get_limit(void)
549
{
550
@@ -XXX,XX +XXX,XX @@ void icount_prepare_for_run(CPUState *cpu)
551
/*
552
* These should always be cleared by icount_process_data after
553
* each vCPU execution. However u16.high can be raised
554
- * asynchronously by cpu_exit/cpu_interrupt/tcg_cpus_handle_interrupt
555
+ * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
556
*/
557
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
558
g_assert(cpu->icount_extra == 0);
559
@@ -XXX,XX +XXX,XX @@ void icount_process_data(CPUState *cpu)
560
replay_mutex_unlock();
561
}
562
563
-static void icount_handle_interrupt(CPUState *cpu, int mask)
564
+void icount_handle_interrupt(CPUState *cpu, int mask)
565
{
566
int old_mask = cpu->interrupt_request;
567
568
- tcg_cpus_handle_interrupt(cpu, mask);
569
+ tcg_handle_interrupt(cpu, mask);
570
if (qemu_cpu_is_self(cpu) &&
571
!cpu->can_do_io
572
&& (mask & ~old_mask) != 0) {
573
cpu_abort(cpu, "Raised interrupt while not in I/O function");
574
}
575
}
576
-
577
-const CpusAccel tcg_cpus_icount = {
578
- .create_vcpu_thread = rr_start_vcpu_thread,
579
- .kick_vcpu_thread = rr_kick_vcpu_thread,
580
-
581
- .handle_interrupt = icount_handle_interrupt,
582
- .get_virtual_clock = icount_get,
583
- .get_elapsed_ticks = icount_get,
584
-};
585
diff --git a/accel/tcg/tcg-cpus-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
586
similarity index 92%
587
rename from accel/tcg/tcg-cpus-mttcg.c
588
rename to accel/tcg/tcg-accel-ops-mttcg.c
589
index XXXXXXX..XXXXXXX 100644
590
--- a/accel/tcg/tcg-cpus-mttcg.c
591
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
592
@@ -XXX,XX +XXX,XX @@
593
#include "exec/exec-all.h"
594
#include "hw/boards.h"
595
596
-#include "tcg-cpus.h"
597
+#include "tcg-accel-ops.h"
598
+#include "tcg-accel-ops-mttcg.h"
599
600
/*
601
* In the multi-threaded case each vCPU has its own thread. The TLS
602
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
603
return NULL;
604
}
605
606
-static void mttcg_kick_vcpu_thread(CPUState *cpu)
607
+void mttcg_kick_vcpu_thread(CPUState *cpu)
608
{
609
cpu_exit(cpu);
610
}
611
612
-static void mttcg_start_vcpu_thread(CPUState *cpu)
613
+void mttcg_start_vcpu_thread(CPUState *cpu)
614
{
615
char thread_name[VCPU_THREAD_NAME_SIZE];
616
617
@@ -XXX,XX +XXX,XX @@ static void mttcg_start_vcpu_thread(CPUState *cpu)
618
cpu->hThread = qemu_thread_get_handle(cpu->thread);
619
#endif
620
}
621
-
622
-const CpusAccel tcg_cpus_mttcg = {
623
- .create_vcpu_thread = mttcg_start_vcpu_thread,
624
- .kick_vcpu_thread = mttcg_kick_vcpu_thread,
625
-
626
- .handle_interrupt = tcg_cpus_handle_interrupt,
627
-};
628
diff --git a/accel/tcg/tcg-cpus-rr.c b/accel/tcg/tcg-accel-ops-rr.c
629
similarity index 97%
630
rename from accel/tcg/tcg-cpus-rr.c
631
rename to accel/tcg/tcg-accel-ops-rr.c
632
index XXXXXXX..XXXXXXX 100644
633
--- a/accel/tcg/tcg-cpus-rr.c
634
+++ b/accel/tcg/tcg-accel-ops-rr.c
635
@@ -XXX,XX +XXX,XX @@
636
#include "exec/exec-all.h"
637
#include "hw/boards.h"
638
639
-#include "tcg-cpus.h"
640
-#include "tcg-cpus-rr.h"
641
-#include "tcg-cpus-icount.h"
642
+#include "tcg-accel-ops.h"
643
+#include "tcg-accel-ops-rr.h"
644
+#include "tcg-accel-ops-icount.h"
645
646
/* Kick all RR vCPUs */
647
void rr_kick_vcpu_thread(CPUState *unused)
648
@@ -XXX,XX +XXX,XX @@ void rr_start_vcpu_thread(CPUState *cpu)
649
cpu->created = true;
650
}
651
}
652
-
653
-const CpusAccel tcg_cpus_rr = {
654
- .create_vcpu_thread = rr_start_vcpu_thread,
655
- .kick_vcpu_thread = rr_kick_vcpu_thread,
656
-
657
- .handle_interrupt = tcg_cpus_handle_interrupt,
658
-};
659
diff --git a/accel/tcg/tcg-cpus.c b/accel/tcg/tcg-accel-ops.c
660
similarity index 63%
661
rename from accel/tcg/tcg-cpus.c
662
rename to accel/tcg/tcg-accel-ops.c
663
index XXXXXXX..XXXXXXX 100644
664
--- a/accel/tcg/tcg-cpus.c
665
+++ b/accel/tcg/tcg-accel-ops.c
666
@@ -XXX,XX +XXX,XX @@
667
#include "exec/exec-all.h"
668
#include "hw/boards.h"
669
670
-#include "tcg-cpus.h"
671
+#include "tcg-accel-ops.h"
672
+#include "tcg-accel-ops-mttcg.h"
673
+#include "tcg-accel-ops-rr.h"
674
+#include "tcg-accel-ops-icount.h"
675
676
/* common functionality among all TCG variants */
677
678
@@ -XXX,XX +XXX,XX @@ int tcg_cpus_exec(CPUState *cpu)
679
}
680
681
/* mask must never be zero, except for A20 change call */
682
-void tcg_cpus_handle_interrupt(CPUState *cpu, int mask)
683
+void tcg_handle_interrupt(CPUState *cpu, int mask)
684
{
685
g_assert(qemu_mutex_iothread_locked());
686
687
@@ -XXX,XX +XXX,XX @@ void tcg_cpus_handle_interrupt(CPUState *cpu, int mask)
688
qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
689
}
690
}
691
+
692
+static void tcg_accel_ops_init(AccelOpsClass *ops)
693
+{
694
+ if (qemu_tcg_mttcg_enabled()) {
695
+ ops->create_vcpu_thread = mttcg_start_vcpu_thread;
696
+ ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
697
+ ops->handle_interrupt = tcg_handle_interrupt;
698
+ } else if (icount_enabled()) {
699
+ ops->create_vcpu_thread = rr_start_vcpu_thread;
700
+ ops->kick_vcpu_thread = rr_kick_vcpu_thread;
701
+ ops->handle_interrupt = icount_handle_interrupt;
702
+ ops->get_virtual_clock = icount_get;
703
+ ops->get_elapsed_ticks = icount_get;
704
+ } else {
705
+ ops->create_vcpu_thread = rr_start_vcpu_thread;
706
+ ops->kick_vcpu_thread = rr_kick_vcpu_thread;
707
+ ops->handle_interrupt = tcg_handle_interrupt;
708
+ }
709
+}
710
+
711
+static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
712
+{
713
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
714
+
715
+ ops->ops_init = tcg_accel_ops_init;
716
+}
717
+
718
+static const TypeInfo tcg_accel_ops_type = {
719
+ .name = ACCEL_OPS_NAME("tcg"),
720
+
721
+ .parent = TYPE_ACCEL_OPS,
722
+ .class_init = tcg_accel_ops_class_init,
723
+ .abstract = true,
724
+};
725
+
726
+static void tcg_accel_ops_register_types(void)
727
+{
728
+ type_register_static(&tcg_accel_ops_type);
729
+}
730
+type_init(tcg_accel_ops_register_types);
731
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
732
index XXXXXXX..XXXXXXX 100644
733
--- a/accel/tcg/tcg-all.c
734
+++ b/accel/tcg/tcg-all.c
735
@@ -XXX,XX +XXX,XX @@
736
#include "qemu/accel.h"
737
#include "qapi/qapi-builtin-visit.h"
738
739
-#ifndef CONFIG_USER_ONLY
740
-#include "tcg-cpus.h"
741
-#endif /* CONFIG_USER_ONLY */
742
-
743
struct TCGState {
744
AccelState parent_obj;
745
746
@@ -XXX,XX +XXX,XX @@ static int tcg_init(MachineState *ms)
747
*/
748
#ifndef CONFIG_USER_ONLY
749
tcg_region_init();
750
-
751
- if (mttcg_enabled) {
752
- cpus_register_accel(&tcg_cpus_mttcg);
753
- } else if (icount_enabled()) {
754
- cpus_register_accel(&tcg_cpus_icount);
755
- } else {
756
- cpus_register_accel(&tcg_cpus_rr);
757
- }
758
#endif /* !CONFIG_USER_ONLY */
759
760
return 0;
761
diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c
762
index XXXXXXX..XXXXXXX 100644
763
--- a/accel/xen/xen-all.c
764
+++ b/accel/xen/xen-all.c
765
@@ -XXX,XX +XXX,XX @@ static void xen_setup_post(MachineState *ms, AccelState *accel)
766
}
767
}
768
769
-const CpusAccel xen_cpus = {
770
- .create_vcpu_thread = dummy_start_vcpu_thread,
771
-};
772
-
773
static int xen_init(MachineState *ms)
774
{
775
MachineClass *mc = MACHINE_GET_CLASS(ms);
776
@@ -XXX,XX +XXX,XX @@ static int xen_init(MachineState *ms)
777
* opt out of system RAM being allocated by generic code
778
*/
779
mc->default_ram_id = NULL;
780
-
781
- cpus_register_accel(&xen_cpus);
782
-
783
return 0;
784
}
785
786
@@ -XXX,XX +XXX,XX @@ static const TypeInfo xen_accel_type = {
787
.class_init = xen_accel_class_init,
788
};
789
790
+static void xen_accel_ops_class_init(ObjectClass *oc, void *data)
791
+{
792
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
793
+
794
+ ops->create_vcpu_thread = dummy_start_vcpu_thread;
795
+}
796
+
797
+static const TypeInfo xen_accel_ops_type = {
798
+ .name = ACCEL_OPS_NAME("xen"),
799
+
800
+ .parent = TYPE_ACCEL_OPS,
801
+ .class_init = xen_accel_ops_class_init,
802
+ .abstract = true,
803
+};
804
+
805
static void xen_type_init(void)
806
{
807
type_register_static(&xen_accel_type);
808
+ type_register_static(&xen_accel_ops_type);
809
}
810
-
811
type_init(xen_type_init);
812
diff --git a/bsd-user/main.c b/bsd-user/main.c
813
index XXXXXXX..XXXXXXX 100644
814
--- a/bsd-user/main.c
815
+++ b/bsd-user/main.c
816
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
817
#endif
818
}
819
820
+ cpu_type = parse_cpu_option(cpu_model);
821
/* init tcg before creating CPUs and to get qemu_host_page_size */
822
{
823
AccelClass *ac = ACCEL_GET_CLASS(current_accel());
824
825
ac->init_machine(NULL);
826
+ accel_init_interfaces(ac);
827
}
828
- cpu_type = parse_cpu_option(cpu_model);
829
cpu = cpu_create(cpu_type);
830
env = cpu->env_ptr;
831
#if defined(TARGET_SPARC) || defined(TARGET_PPC)
832
diff --git a/linux-user/main.c b/linux-user/main.c
833
index XXXXXXX..XXXXXXX 100644
834
--- a/linux-user/main.c
835
+++ b/linux-user/main.c
836
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
837
AccelClass *ac = ACCEL_GET_CLASS(current_accel());
838
839
ac->init_machine(NULL);
840
+ accel_init_interfaces(ac);
841
}
842
cpu = cpu_create(cpu_type);
843
env = cpu->env_ptr;
844
diff --git a/softmmu/cpus.c b/softmmu/cpus.c
845
index XXXXXXX..XXXXXXX 100644
846
--- a/softmmu/cpus.c
847
+++ b/softmmu/cpus.c
848
@@ -XXX,XX +XXX,XX @@ void hw_error(const char *fmt, ...)
849
/*
850
* The chosen accelerator is supposed to register this.
851
*/
852
-static const CpusAccel *cpus_accel;
853
+static const AccelOpsClass *cpus_accel;
854
855
void cpu_synchronize_all_states(void)
856
{
857
@@ -XXX,XX +XXX,XX @@ void cpu_remove_sync(CPUState *cpu)
858
qemu_mutex_lock_iothread();
859
}
860
861
-void cpus_register_accel(const CpusAccel *ca)
862
+void cpus_register_accel(const AccelOpsClass *ops)
863
{
864
- assert(ca != NULL);
865
- assert(ca->create_vcpu_thread != NULL); /* mandatory */
866
- cpus_accel = ca;
867
+ assert(ops != NULL);
868
+ assert(ops->create_vcpu_thread != NULL); /* mandatory */
869
+ cpus_accel = ops;
870
}
871
872
void qemu_init_vcpu(CPUState *cpu)
873
@@ -XXX,XX +XXX,XX @@ void qemu_init_vcpu(CPUState *cpu)
874
cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
875
}
876
877
- /* accelerators all implement the CpusAccel interface */
878
+ /* accelerators all implement the AccelOpsClass */
879
g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL);
880
cpus_accel->create_vcpu_thread(cpu);
881
882
diff --git a/softmmu/vl.c b/softmmu/vl.c
883
index XXXXXXX..XXXXXXX 100644
884
--- a/softmmu/vl.c
885
+++ b/softmmu/vl.c
886
@@ -XXX,XX +XXX,XX @@ static bool object_create_early(const char *type, QemuOpts *opts)
887
return false;
888
}
889
890
- /* Allocation of large amounts of memory may delay
891
+ /*
892
+ * Allocation of large amounts of memory may delay
893
* chardev initialization for too long, and trigger timeouts
894
* on software that waits for a monitor socket to be created
895
* (e.g. libvirt).
896
@@ -XXX,XX +XXX,XX @@ void qemu_init(int argc, char **argv, char **envp)
897
*
898
* Machine compat properties: object_set_machine_compat_props().
899
* Accelerator compat props: object_set_accelerator_compat_props(),
900
- * called from configure_accelerator().
901
+ * called from do_configure_accelerator().
902
*/
903
904
machine_class = MACHINE_GET_CLASS(current_machine);
905
@@ -XXX,XX +XXX,XX @@ void qemu_init(int argc, char **argv, char **envp)
906
if (cpu_option) {
907
current_machine->cpu_type = parse_cpu_option(cpu_option);
908
}
909
+ /* NB: for machine none cpu_type could STILL be NULL here! */
910
+ accel_init_interfaces(ACCEL_GET_CLASS(current_machine->accelerator));
911
912
qemu_resolve_machine_memdev();
913
parse_numa_opts(current_machine);
914
diff --git a/target/i386/hax/hax-cpus.c b/target/i386/hax/hax-accel-ops.c
915
similarity index 69%
916
rename from target/i386/hax/hax-cpus.c
917
rename to target/i386/hax/hax-accel-ops.c
918
index XXXXXXX..XXXXXXX 100644
919
--- a/target/i386/hax/hax-cpus.c
920
+++ b/target/i386/hax/hax-accel-ops.c
921
@@ -XXX,XX +XXX,XX @@
922
#include "sysemu/cpus.h"
923
#include "qemu/guest-random.h"
924
925
-#include "hax-cpus.h"
926
+#include "hax-accel-ops.h"
927
928
static void *hax_cpu_thread_fn(void *arg)
929
{
930
@@ -XXX,XX +XXX,XX @@ static void hax_start_vcpu_thread(CPUState *cpu)
931
#endif
932
}
933
934
-const CpusAccel hax_cpus = {
935
- .create_vcpu_thread = hax_start_vcpu_thread,
936
- .kick_vcpu_thread = hax_kick_vcpu_thread,
937
+static void hax_accel_ops_class_init(ObjectClass *oc, void *data)
938
+{
939
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
940
941
- .synchronize_post_reset = hax_cpu_synchronize_post_reset,
942
- .synchronize_post_init = hax_cpu_synchronize_post_init,
943
- .synchronize_state = hax_cpu_synchronize_state,
944
- .synchronize_pre_loadvm = hax_cpu_synchronize_pre_loadvm,
945
+ ops->create_vcpu_thread = hax_start_vcpu_thread;
946
+ ops->kick_vcpu_thread = hax_kick_vcpu_thread;
947
+
948
+ ops->synchronize_post_reset = hax_cpu_synchronize_post_reset;
949
+ ops->synchronize_post_init = hax_cpu_synchronize_post_init;
950
+ ops->synchronize_state = hax_cpu_synchronize_state;
951
+ ops->synchronize_pre_loadvm = hax_cpu_synchronize_pre_loadvm;
952
+}
953
+
954
+static const TypeInfo hax_accel_ops_type = {
955
+ .name = ACCEL_OPS_NAME("hax"),
956
+
957
+ .parent = TYPE_ACCEL_OPS,
958
+ .class_init = hax_accel_ops_class_init,
959
+ .abstract = true,
960
};
961
+
962
+static void hax_accel_ops_register_types(void)
963
+{
964
+ type_register_static(&hax_accel_ops_type);
965
+}
966
+type_init(hax_accel_ops_register_types);
967
diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c
968
index XXXXXXX..XXXXXXX 100644
969
--- a/target/i386/hax/hax-all.c
970
+++ b/target/i386/hax/hax-all.c
971
@@ -XXX,XX +XXX,XX @@
972
#include "sysemu/runstate.h"
973
#include "hw/boards.h"
974
975
-#include "hax-cpus.h"
976
+#include "hax-accel-ops.h"
977
978
#define DEBUG_HAX 0
979
980
@@ -XXX,XX +XXX,XX @@ static int hax_accel_init(MachineState *ms)
981
!ret ? "working" : "not working",
982
!ret ? "fast virt" : "emulation");
983
}
984
- if (ret == 0) {
985
- cpus_register_accel(&hax_cpus);
986
- }
987
return ret;
988
}
989
990
diff --git a/target/i386/hax/hax-mem.c b/target/i386/hax/hax-mem.c
991
index XXXXXXX..XXXXXXX 100644
992
--- a/target/i386/hax/hax-mem.c
993
+++ b/target/i386/hax/hax-mem.c
994
@@ -XXX,XX +XXX,XX @@
995
#include "exec/address-spaces.h"
996
#include "qemu/error-report.h"
997
998
-#include "hax-cpus.h"
999
+#include "hax-accel-ops.h"
1000
#include "qemu/queue.h"
1001
1002
#define DEBUG_HAX_MEM 0
1003
diff --git a/target/i386/hax/hax-posix.c b/target/i386/hax/hax-posix.c
1004
index XXXXXXX..XXXXXXX 100644
1005
--- a/target/i386/hax/hax-posix.c
1006
+++ b/target/i386/hax/hax-posix.c
1007
@@ -XXX,XX +XXX,XX @@
1008
#include <sys/ioctl.h>
1009
1010
#include "sysemu/cpus.h"
1011
-#include "hax-cpus.h"
1012
+#include "hax-accel-ops.h"
1013
1014
hax_fd hax_mod_open(void)
1015
{
1016
diff --git a/target/i386/hax/hax-windows.c b/target/i386/hax/hax-windows.c
1017
index XXXXXXX..XXXXXXX 100644
1018
--- a/target/i386/hax/hax-windows.c
1019
+++ b/target/i386/hax/hax-windows.c
1020
@@ -XXX,XX +XXX,XX @@
1021
1022
#include "qemu/osdep.h"
1023
#include "cpu.h"
1024
-#include "hax-cpus.h"
1025
+#include "hax-accel-ops.h"
1026
1027
/*
1028
* return 0 when success, -1 when driver not loaded,
1029
diff --git a/target/i386/hvf/hvf-cpus.c b/target/i386/hvf/hvf-accel-ops.c
1030
similarity index 84%
1031
rename from target/i386/hvf/hvf-cpus.c
1032
rename to target/i386/hvf/hvf-accel-ops.c
1033
index XXXXXXX..XXXXXXX 100644
1034
--- a/target/i386/hvf/hvf-cpus.c
1035
+++ b/target/i386/hvf/hvf-accel-ops.c
1036
@@ -XXX,XX +XXX,XX @@
1037
#include "target/i386/cpu.h"
1038
#include "qemu/guest-random.h"
1039
1040
-#include "hvf-cpus.h"
1041
+#include "hvf-accel-ops.h"
1042
1043
/*
1044
* The HVF-specific vCPU thread function. This one should only run when the host
1045
@@ -XXX,XX +XXX,XX @@ static void hvf_start_vcpu_thread(CPUState *cpu)
1046
cpu, QEMU_THREAD_JOINABLE);
1047
}
1048
1049
-const CpusAccel hvf_cpus = {
1050
- .create_vcpu_thread = hvf_start_vcpu_thread,
1051
+static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
1052
+{
1053
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
1054
1055
- .synchronize_post_reset = hvf_cpu_synchronize_post_reset,
1056
- .synchronize_post_init = hvf_cpu_synchronize_post_init,
1057
- .synchronize_state = hvf_cpu_synchronize_state,
1058
- .synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm,
1059
+ ops->create_vcpu_thread = hvf_start_vcpu_thread;
1060
+
1061
+ ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
1062
+ ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
1063
+ ops->synchronize_state = hvf_cpu_synchronize_state;
1064
+ ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
1065
};
1066
+static const TypeInfo hvf_accel_ops_type = {
1067
+ .name = ACCEL_OPS_NAME("hvf"),
1068
+
1069
+ .parent = TYPE_ACCEL_OPS,
1070
+ .class_init = hvf_accel_ops_class_init,
1071
+ .abstract = true,
1072
+};
1073
+static void hvf_accel_ops_register_types(void)
1074
+{
1075
+ type_register_static(&hvf_accel_ops_type);
1076
+}
1077
+type_init(hvf_accel_ops_register_types);
1078
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
1079
index XXXXXXX..XXXXXXX 100644
1080
--- a/target/i386/hvf/hvf.c
1081
+++ b/target/i386/hvf/hvf.c
1082
@@ -XXX,XX +XXX,XX @@
1083
#include "qemu/accel.h"
1084
#include "target/i386/cpu.h"
1085
1086
-#include "hvf-cpus.h"
1087
+#include "hvf-accel-ops.h"
1088
1089
HVFState *hvf_state;
1090
1091
@@ -XXX,XX +XXX,XX @@ static int hvf_accel_init(MachineState *ms)
1092
1093
hvf_state = s;
1094
memory_listener_register(&hvf_memory_listener, &address_space_memory);
1095
- cpus_register_accel(&hvf_cpus);
1096
return 0;
1097
}
1098
1099
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
1100
index XXXXXXX..XXXXXXX 100644
1101
--- a/target/i386/hvf/x86hvf.c
1102
+++ b/target/i386/hvf/x86hvf.c
1103
@@ -XXX,XX +XXX,XX @@
1104
#include <Hypervisor/hv.h>
1105
#include <Hypervisor/hv_vmx.h>
1106
1107
-#include "hvf-cpus.h"
1108
+#include "hvf-accel-ops.h"
1109
1110
void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg,
1111
SegmentCache *qseg, bool is_tr)
1112
diff --git a/target/i386/whpx/whpx-cpus.c b/target/i386/whpx/whpx-accel-ops.c
1113
similarity index 71%
1114
rename from target/i386/whpx/whpx-cpus.c
1115
rename to target/i386/whpx/whpx-accel-ops.c
1116
index XXXXXXX..XXXXXXX 100644
1117
--- a/target/i386/whpx/whpx-cpus.c
1118
+++ b/target/i386/whpx/whpx-accel-ops.c
1119
@@ -XXX,XX +XXX,XX @@
1120
1121
#include "sysemu/whpx.h"
1122
#include "whpx-internal.h"
1123
-#include "whpx-cpus.h"
1124
+#include "whpx-accel-ops.h"
1125
1126
static void *whpx_cpu_thread_fn(void *arg)
1127
{
1128
@@ -XXX,XX +XXX,XX @@ static void whpx_kick_vcpu_thread(CPUState *cpu)
1129
}
1130
}
1131
1132
-const CpusAccel whpx_cpus = {
1133
- .create_vcpu_thread = whpx_start_vcpu_thread,
1134
- .kick_vcpu_thread = whpx_kick_vcpu_thread,
1135
+static void whpx_accel_ops_class_init(ObjectClass *oc, void *data)
1136
+{
1137
+ AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
1138
1139
- .synchronize_post_reset = whpx_cpu_synchronize_post_reset,
1140
- .synchronize_post_init = whpx_cpu_synchronize_post_init,
1141
- .synchronize_state = whpx_cpu_synchronize_state,
1142
- .synchronize_pre_loadvm = whpx_cpu_synchronize_pre_loadvm,
1143
+ ops->create_vcpu_thread = whpx_start_vcpu_thread;
1144
+ ops->kick_vcpu_thread = whpx_kick_vcpu_thread;
1145
+
1146
+ ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset;
1147
+ ops->synchronize_post_init = whpx_cpu_synchronize_post_init;
1148
+ ops->synchronize_state = whpx_cpu_synchronize_state;
1149
+ ops->synchronize_pre_loadvm = whpx_cpu_synchronize_pre_loadvm;
1150
+}
1151
+
1152
+static const TypeInfo whpx_accel_ops_type = {
1153
+ .name = ACCEL_OPS_NAME("whpx"),
1154
+
1155
+ .parent = TYPE_ACCEL_OPS,
1156
+ .class_init = whpx_accel_ops_class_init,
1157
+ .abstract = true,
1158
};
1159
+
1160
+static void whpx_accel_ops_register_types(void)
1161
+{
1162
+ type_register_static(&whpx_accel_ops_type);
1163
+}
1164
+type_init(whpx_accel_ops_register_types);
1165
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
1166
index XXXXXXX..XXXXXXX 100644
1167
--- a/target/i386/whpx/whpx-all.c
1168
+++ b/target/i386/whpx/whpx-all.c
1169
@@ -XXX,XX +XXX,XX @@
1170
#include "migration/blocker.h"
1171
#include <winerror.h>
1172
1173
-#include "whpx-cpus.h"
1174
#include "whpx-internal.h"
1175
+#include "whpx-accel-ops.h"
1176
+
1177
+#include <WinHvPlatform.h>
1178
+#include <WinHvEmulation.h>
1179
1180
#define HYPERV_APIC_BUS_FREQUENCY (200000000ULL)
1181
1182
@@ -XXX,XX +XXX,XX @@ static int whpx_accel_init(MachineState *ms)
1183
1184
whpx_memory_init();
1185
1186
- cpus_register_accel(&whpx_cpus);
1187
-
1188
printf("Windows Hypervisor Platform accelerator is operational\n");
1189
return 0;
1190
1191
diff --git a/MAINTAINERS b/MAINTAINERS
1192
index XXXXXXX..XXXXXXX 100644
1193
--- a/MAINTAINERS
1194
+++ b/MAINTAINERS
1195
@@ -XXX,XX +XXX,XX @@ M: Richard Henderson <richard.henderson@linaro.org>
1196
R: Paolo Bonzini <pbonzini@redhat.com>
1197
S: Maintained
1198
F: include/qemu/accel.h
1199
-F: accel/accel.c
1200
+F: include/sysemu/accel-ops.h
1201
+F: accel/accel-*.c
1202
F: accel/Makefile.objs
1203
F: accel/stubs/Makefile.objs
1204
1205
diff --git a/accel/kvm/meson.build b/accel/kvm/meson.build
1206
index XXXXXXX..XXXXXXX 100644
1207
--- a/accel/kvm/meson.build
1208
+++ b/accel/kvm/meson.build
1209
@@ -XXX,XX +XXX,XX @@
1210
kvm_ss = ss.source_set()
1211
kvm_ss.add(files(
1212
'kvm-all.c',
1213
- 'kvm-cpus.c',
1214
+ 'kvm-accel-ops.c',
1215
))
1216
kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c'))
1217
1218
diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build
1219
index XXXXXXX..XXXXXXX 100644
1220
--- a/accel/tcg/meson.build
1221
+++ b/accel/tcg/meson.build
1222
@@ -XXX,XX +XXX,XX @@ specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
1223
1224
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
1225
'cputlb.c',
1226
- 'tcg-cpus.c',
1227
- 'tcg-cpus-mttcg.c',
1228
- 'tcg-cpus-icount.c',
1229
- 'tcg-cpus-rr.c'
1230
+ 'tcg-accel-ops.c',
1231
+ 'tcg-accel-ops-mttcg.c',
1232
+ 'tcg-accel-ops-icount.c',
1233
+ 'tcg-accel-ops-rr.c'
1234
))
1235
diff --git a/target/i386/hax/meson.build b/target/i386/hax/meson.build
1236
index XXXXXXX..XXXXXXX 100644
1237
--- a/target/i386/hax/meson.build
1238
+++ b/target/i386/hax/meson.build
1239
@@ -XXX,XX +XXX,XX @@
1240
i386_softmmu_ss.add(when: 'CONFIG_HAX', if_true: files(
1241
'hax-all.c',
1242
'hax-mem.c',
1243
- 'hax-cpus.c',
1244
+ 'hax-accel-ops.c',
1245
))
1246
i386_softmmu_ss.add(when: ['CONFIG_HAX', 'CONFIG_POSIX'], if_true: files('hax-posix.c'))
1247
i386_softmmu_ss.add(when: ['CONFIG_HAX', 'CONFIG_WIN32'], if_true: files('hax-windows.c'))
1248
diff --git a/target/i386/hvf/meson.build b/target/i386/hvf/meson.build
1249
index XXXXXXX..XXXXXXX 100644
1250
--- a/target/i386/hvf/meson.build
1251
+++ b/target/i386/hvf/meson.build
1252
@@ -XXX,XX +XXX,XX @@
1253
i386_softmmu_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files(
1254
'hvf.c',
1255
- 'hvf-cpus.c',
1256
+ 'hvf-accel-ops.c',
1257
'x86.c',
1258
'x86_cpuid.c',
1259
'x86_decode.c',
1260
diff --git a/target/i386/whpx/meson.build b/target/i386/whpx/meson.build
1261
index XXXXXXX..XXXXXXX 100644
1262
--- a/target/i386/whpx/meson.build
1263
+++ b/target/i386/whpx/meson.build
1264
@@ -XXX,XX +XXX,XX @@
1265
i386_softmmu_ss.add(when: 'CONFIG_WHPX', if_true: files(
1266
'whpx-all.c',
1267
'whpx-apic.c',
1268
- 'whpx-cpus.c',
1269
+ 'whpx-accel-ops.c',
1270
))
1271
--
45
--
1272
2.25.1
46
2.43.0
1273
1274
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Sink mask computation below fold_affected_mask early exit.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 30 ++++++++++++++++--------------
8
1 file changed, 16 insertions(+), 14 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_add2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_and(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1, z2;
19
+ uint64_t z1, z2, z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2_commutative(ctx, op) ||
23
fold_xi_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
- z2 = arg_info(op->args[2])->z_mask;
30
- ctx->z_mask = z1 & z2;
31
-
32
- /*
33
- * Sign repetitions are perforce all identical, whether they are 1 or 0.
34
- * Bitwise operations preserve the relative quantity of the repetitions.
35
- */
36
- ctx->s_mask = arg_info(op->args[1])->s_mask
37
- & arg_info(op->args[2])->s_mask;
38
+ t1 = arg_info(op->args[1]);
39
+ t2 = arg_info(op->args[2]);
40
+ z1 = t1->z_mask;
41
+ z2 = t2->z_mask;
42
43
/*
44
* Known-zeros does not imply known-ones. Therefore unless
45
* arg2 is constant, we can't infer affected bits from it.
46
*/
47
- if (arg_is_const(op->args[2]) &&
48
- fold_affected_mask(ctx, op, z1 & ~z2)) {
49
+ if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
50
return true;
51
}
52
53
- return fold_masks(ctx, op);
54
+ z_mask = z1 & z2;
55
+
56
+ /*
57
+ * Sign repetitions are perforce all identical, whether they are 1 or 0.
58
+ * Bitwise operations preserve the relative quantity of the repetitions.
59
+ */
60
+ s_mask = t1->s_mask & t2->s_mask;
61
+
62
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
63
}
64
65
static bool fold_andc(OptContext *ctx, TCGOp *op)
66
--
67
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Avoid double inversion of the value of second const operand.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 21 +++++++++++----------
8
1 file changed, 11 insertions(+), 10 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_and(OptContext *ctx, TCGOp *op)
15
16
static bool fold_andc(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t z1;
19
+ uint64_t z_mask, s_mask;
20
+ TempOptInfo *t1, *t2;
21
22
if (fold_const2(ctx, op) ||
23
fold_xx_to_i(ctx, op, 0) ||
24
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
25
return true;
26
}
27
28
- z1 = arg_info(op->args[1])->z_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ t2 = arg_info(op->args[2]);
31
+ z_mask = t1->z_mask;
32
33
/*
34
* Known-zeros does not imply known-ones. Therefore unless
35
* arg2 is constant, we can't infer anything from it.
36
*/
37
- if (arg_is_const(op->args[2])) {
38
- uint64_t z2 = ~arg_info(op->args[2])->z_mask;
39
- if (fold_affected_mask(ctx, op, z1 & ~z2)) {
40
+ if (ti_is_const(t2)) {
41
+ uint64_t v2 = ti_const_val(t2);
42
+ if (fold_affected_mask(ctx, op, z_mask & v2)) {
43
return true;
44
}
45
- z1 &= z2;
46
+ z_mask &= ~v2;
47
}
48
- ctx->z_mask = z1;
49
50
- ctx->s_mask = arg_info(op->args[1])->s_mask
51
- & arg_info(op->args[2])->s_mask;
52
- return fold_masks(ctx, op);
53
+ s_mask = t1->s_mask & t2->s_mask;
54
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
55
}
56
57
static bool fold_brcond(OptContext *ctx, TCGOp *op)
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Always set s_mask along the BSWAP_OS path, since the result is
3
being explicitly sign-extended.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 21 ++++++++++-----------
9
1 file changed, 10 insertions(+), 11 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
16
static bool fold_bswap(OptContext *ctx, TCGOp *op)
17
{
18
uint64_t z_mask, s_mask, sign;
19
+ TempOptInfo *t1 = arg_info(op->args[1]);
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t = arg_info(op->args[1])->val;
23
-
24
- t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
25
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
26
+ if (ti_is_const(t1)) {
27
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
28
+ do_constant_folding(op->opc, ctx->type,
29
+ ti_const_val(t1),
30
+ op->args[2]));
31
}
32
33
- z_mask = arg_info(op->args[1])->z_mask;
34
-
35
+ z_mask = t1->z_mask;
36
switch (op->opc) {
37
case INDEX_op_bswap16_i32:
38
case INDEX_op_bswap16_i64:
39
@@ -XXX,XX +XXX,XX @@ static bool fold_bswap(OptContext *ctx, TCGOp *op)
40
/* If the sign bit may be 1, force all the bits above to 1. */
41
if (z_mask & sign) {
42
z_mask |= sign;
43
- s_mask = sign << 1;
44
}
45
+ /* The value and therefore s_mask is explicitly sign-extended. */
46
+ s_mask = sign;
47
break;
48
default:
49
/* The high bits are undefined: force all bits above the sign to 1. */
50
z_mask |= sign << 1;
51
break;
52
}
53
- ctx->z_mask = z_mask;
54
- ctx->s_mask = s_mask;
55
56
- return fold_masks(ctx, op);
57
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
58
}
59
60
static bool fold_call(OptContext *ctx, TCGOp *op)
61
--
62
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Compute s_mask from the union of the maximum count and the
3
op2 fallback for op1 being zero.
1
4
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/optimize.c | 15 ++++++++++-----
9
1 file changed, 10 insertions(+), 5 deletions(-)
10
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/optimize.c
14
+++ b/tcg/optimize.c
15
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
16
17
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
18
{
19
- uint64_t z_mask;
20
+ uint64_t z_mask, s_mask;
21
+ TempOptInfo *t1 = arg_info(op->args[1]);
22
+ TempOptInfo *t2 = arg_info(op->args[2]);
23
24
- if (arg_is_const(op->args[1])) {
25
- uint64_t t = arg_info(op->args[1])->val;
26
+ if (ti_is_const(t1)) {
27
+ uint64_t t = ti_const_val(t1);
28
29
if (t != 0) {
30
t = do_constant_folding(op->opc, ctx->type, t, 0);
31
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
32
default:
33
g_assert_not_reached();
34
}
35
- ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
36
- return false;
37
+ s_mask = ~z_mask;
38
+ z_mask |= t2->z_mask;
39
+ s_mask &= t2->s_mask;
40
+
41
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
42
}
43
44
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Add fold_masks_z as a trivial wrapper around fold_masks_zs.
2
Avoid the use of the OptContext slots.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 13 ++++++++++---
8
1 file changed, 10 insertions(+), 3 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
15
return true;
16
}
17
18
+static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
19
+{
20
+ return fold_masks_zs(ctx, op, z_mask, 0);
21
+}
22
+
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
24
{
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
27
28
static bool fold_ctpop(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t z_mask;
31
+
32
if (fold_const1(ctx, op)) {
33
return true;
34
}
35
36
switch (ctx->type) {
37
case TCG_TYPE_I32:
38
- ctx->z_mask = 32 | 31;
39
+ z_mask = 32 | 31;
40
break;
41
case TCG_TYPE_I64:
42
- ctx->z_mask = 64 | 63;
43
+ z_mask = 64 | 63;
44
break;
45
default:
46
g_assert_not_reached();
47
}
48
- return false;
49
+ return fold_masks_z(ctx, op, z_mask);
50
}
51
52
static bool fold_deposit(OptContext *ctx, TCGOp *op)
53
--
54
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
When we fold to and, use fold_and.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 35 +++++++++++++++++------------------
8
1 file changed, 17 insertions(+), 18 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_ctpop(OptContext *ctx, TCGOp *op)
15
16
static bool fold_deposit(OptContext *ctx, TCGOp *op)
17
{
18
+ TempOptInfo *t1 = arg_info(op->args[1]);
19
+ TempOptInfo *t2 = arg_info(op->args[2]);
20
+ int ofs = op->args[3];
21
+ int len = op->args[4];
22
TCGOpcode and_opc;
23
+ uint64_t z_mask;
24
25
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
26
- uint64_t t1 = arg_info(op->args[1])->val;
27
- uint64_t t2 = arg_info(op->args[2])->val;
28
-
29
- t1 = deposit64(t1, op->args[3], op->args[4], t2);
30
- return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
31
+ if (ti_is_const(t1) && ti_is_const(t2)) {
32
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
33
+ deposit64(ti_const_val(t1), ofs, len,
34
+ ti_const_val(t2)));
35
}
36
37
switch (ctx->type) {
38
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
39
}
40
41
/* Inserting a value into zero at offset 0. */
42
- if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
43
- uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
44
+ if (ti_is_const_val(t1, 0) && ofs == 0) {
45
+ uint64_t mask = MAKE_64BIT_MASK(0, len);
46
47
op->opc = and_opc;
48
op->args[1] = op->args[2];
49
op->args[2] = arg_new_constant(ctx, mask);
50
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
51
- return false;
52
+ return fold_and(ctx, op);
53
}
54
55
/* Inserting zero into a value. */
56
- if (arg_is_const_val(op->args[2], 0)) {
57
- uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
58
+ if (ti_is_const_val(t2, 0)) {
59
+ uint64_t mask = deposit64(-1, ofs, len, 0);
60
61
op->opc = and_opc;
62
op->args[2] = arg_new_constant(ctx, mask);
63
- ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
64
- return false;
65
+ return fold_and(ctx, op);
66
}
67
68
- ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
69
- op->args[3], op->args[4],
70
- arg_info(op->args[2])->z_mask);
71
- return false;
72
+ z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
73
+ return fold_masks_z(ctx, op, z_mask);
74
}
75
76
static bool fold_divide(OptContext *ctx, TCGOp *op)
77
--
78
2.43.0
diff view generated by jsdifflib
New patch
1
The input which overlaps the sign bit of the output can
2
have its input s_mask propagated to the output s_mask.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 14 ++++++++++++--
8
1 file changed, 12 insertions(+), 2 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
15
TempOptInfo *t2 = arg_info(op->args[2]);
16
int ofs = op->args[3];
17
int len = op->args[4];
18
+ int width;
19
TCGOpcode and_opc;
20
- uint64_t z_mask;
21
+ uint64_t z_mask, s_mask;
22
23
if (ti_is_const(t1) && ti_is_const(t2)) {
24
return tcg_opt_gen_movi(ctx, op, op->args[0],
25
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
26
switch (ctx->type) {
27
case TCG_TYPE_I32:
28
and_opc = INDEX_op_and_i32;
29
+ width = 32;
30
break;
31
case TCG_TYPE_I64:
32
and_opc = INDEX_op_and_i64;
33
+ width = 64;
34
break;
35
default:
36
g_assert_not_reached();
37
@@ -XXX,XX +XXX,XX @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
38
return fold_and(ctx, op);
39
}
40
41
+ /* The s_mask from the top portion of the deposit is still valid. */
42
+ if (ofs + len == width) {
43
+ s_mask = t2->s_mask << ofs;
44
+ } else {
45
+ s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
46
+ }
47
+
48
z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
49
- return fold_masks_z(ctx, op, z_mask);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_divide(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_divide(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 4 ++--
5
1 file changed, 2 insertions(+), 2 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_dup(OptContext *ctx, TCGOp *op)
12
t = dup_const(TCGOP_VECE(op), t);
13
return tcg_opt_gen_movi(ctx, op, op->args[0], t);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_dup2(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
21
op->opc = INDEX_op_dup_vec;
22
TCGOP_VECE(op) = MO_32;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
--
30
2.43.0
diff view generated by jsdifflib
1
The existing check was incomplete:
1
Add fold_masks_s as a trivial wrapper around fold_masks_zs.
2
(1) Only applied to two of the 7 stores, and not to the loads at all.
2
Avoid the use of the OptContext slots.
3
(2) Only checked the upper, but not the lower bound of the stack.
4
3
5
Doing this at compile time means that we don't need to do it
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
at runtime as well.
7
8
Tested-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
tcg/tci.c | 2 --
7
tcg/optimize.c | 13 ++++++++++---
13
tcg/tci/tcg-target.c.inc | 13 +++++++++++++
8
1 file changed, 10 insertions(+), 3 deletions(-)
14
2 files changed, 13 insertions(+), 2 deletions(-)
15
9
16
diff --git a/tcg/tci.c b/tcg/tci.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/tcg/tci.c
12
--- a/tcg/optimize.c
19
+++ b/tcg/tci.c
13
+++ b/tcg/optimize.c
20
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
21
t0 = tci_read_r32(regs, &tb_ptr);
15
return fold_masks_zs(ctx, op, z_mask, 0);
22
t1 = tci_read_r(regs, &tb_ptr);
23
t2 = tci_read_s32(&tb_ptr);
24
- tci_assert(t1 != sp_value || (int32_t)t2 < 0);
25
*(uint32_t *)(t1 + t2) = t0;
26
break;
27
28
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
29
t0 = tci_read_r64(regs, &tb_ptr);
30
t1 = tci_read_r(regs, &tb_ptr);
31
t2 = tci_read_s32(&tb_ptr);
32
- tci_assert(t1 != sp_value || (int32_t)t2 < 0);
33
*(uint64_t *)(t1 + t2) = t0;
34
break;
35
36
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
37
index XXXXXXX..XXXXXXX 100644
38
--- a/tcg/tci/tcg-target.c.inc
39
+++ b/tcg/tci/tcg-target.c.inc
40
@@ -XXX,XX +XXX,XX @@ static void tci_out_label(TCGContext *s, TCGLabel *label)
41
}
42
}
16
}
43
17
44
+static void stack_bounds_check(TCGReg base, target_long offset)
18
+static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
45
+{
19
+{
46
+ if (base == TCG_REG_CALL_STACK) {
20
+ return fold_masks_zs(ctx, op, -1, s_mask);
47
+ tcg_debug_assert(offset < 0);
48
+ tcg_debug_assert(offset >= -(CPU_TEMP_BUF_NLONGS * sizeof(long)));
49
+ }
50
+}
21
+}
51
+
22
+
52
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
23
static bool fold_masks(OptContext *ctx, TCGOp *op)
53
intptr_t arg2)
54
{
24
{
55
uint8_t *old_code_ptr = s->code_ptr;
25
return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
26
@@ -XXX,XX +XXX,XX @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
27
28
static bool fold_eqv(OptContext *ctx, TCGOp *op)
29
{
30
+ uint64_t s_mask;
56
+
31
+
57
+ stack_bounds_check(arg1, arg2);
32
if (fold_const2_commutative(ctx, op) ||
58
if (type == TCG_TYPE_I32) {
33
fold_xi_to_x(ctx, op, -1) ||
59
tcg_out_op_t(s, INDEX_op_ld_i32);
34
fold_xi_to_not(ctx, op, 0)) {
60
tcg_out_r(s, ret);
35
return true;
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
36
}
62
case INDEX_op_st16_i64:
37
63
case INDEX_op_st32_i64:
38
- ctx->s_mask = arg_info(op->args[1])->s_mask
64
case INDEX_op_st_i64:
39
- & arg_info(op->args[2])->s_mask;
65
+ stack_bounds_check(args[1], args[2]);
40
- return false;
66
tcg_out_r(s, args[0]);
41
+ s_mask = arg_info(op->args[1])->s_mask
67
tcg_out_r(s, args[1]);
42
+ & arg_info(op->args[2])->s_mask;
68
tcg_debug_assert(args[2] == (int32_t)args[2]);
43
+ return fold_masks_s(ctx, op, s_mask);
69
@@ -XXX,XX +XXX,XX @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
44
}
70
intptr_t arg2)
45
71
{
46
static bool fold_extract(OptContext *ctx, TCGOp *op)
72
uint8_t *old_code_ptr = s->code_ptr;
73
+
74
+ stack_bounds_check(arg1, arg2);
75
if (type == TCG_TYPE_I32) {
76
tcg_out_op_t(s, INDEX_op_st_i32);
77
tcg_out_r(s, arg);
78
--
47
--
79
2.25.1
48
2.43.0
80
81
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 15 ++++++---------
7
1 file changed, 6 insertions(+), 9 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_eqv(OptContext *ctx, TCGOp *op)
14
static bool fold_extract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask_old, z_mask;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = extract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ extract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask_old = arg_info(op->args[1])->z_mask;
33
+ z_mask_old = t1->z_mask;
34
z_mask = extract64(z_mask_old, pos, len);
35
if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
36
return true;
37
}
38
- ctx->z_mask = z_mask;
39
40
- return fold_masks(ctx, op);
41
+ return fold_masks_z(ctx, op, z_mask);
42
}
43
44
static bool fold_extract2(OptContext *ctx, TCGOp *op)
45
--
46
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
12
}
13
return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_exts(OptContext *ctx, TCGOp *op)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Explicitly sign-extend z_mask instead of doing that manually.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 29 ++++++++++++-----------------
8
1 file changed, 12 insertions(+), 17 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
15
16
static bool fold_exts(OptContext *ctx, TCGOp *op)
17
{
18
- uint64_t s_mask_old, s_mask, z_mask, sign;
19
+ uint64_t s_mask_old, s_mask, z_mask;
20
bool type_change = false;
21
+ TempOptInfo *t1;
22
23
if (fold_const1(ctx, op)) {
24
return true;
25
}
26
27
- z_mask = arg_info(op->args[1])->z_mask;
28
- s_mask = arg_info(op->args[1])->s_mask;
29
+ t1 = arg_info(op->args[1]);
30
+ z_mask = t1->z_mask;
31
+ s_mask = t1->s_mask;
32
s_mask_old = s_mask;
33
34
switch (op->opc) {
35
CASE_OP_32_64(ext8s):
36
- sign = INT8_MIN;
37
- z_mask = (uint8_t)z_mask;
38
+ s_mask |= INT8_MIN;
39
+ z_mask = (int8_t)z_mask;
40
break;
41
CASE_OP_32_64(ext16s):
42
- sign = INT16_MIN;
43
- z_mask = (uint16_t)z_mask;
44
+ s_mask |= INT16_MIN;
45
+ z_mask = (int16_t)z_mask;
46
break;
47
case INDEX_op_ext_i32_i64:
48
type_change = true;
49
QEMU_FALLTHROUGH;
50
case INDEX_op_ext32s_i64:
51
- sign = INT32_MIN;
52
- z_mask = (uint32_t)z_mask;
53
+ s_mask |= INT32_MIN;
54
+ z_mask = (int32_t)z_mask;
55
break;
56
default:
57
g_assert_not_reached();
58
}
59
60
- if (z_mask & sign) {
61
- z_mask |= sign;
62
- }
63
- s_mask |= sign << 1;
64
-
65
- ctx->z_mask = z_mask;
66
- ctx->s_mask = s_mask;
67
if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
68
return true;
69
}
70
71
- return fold_masks(ctx, op);
72
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
73
}
74
75
static bool fold_extu(OptContext *ctx, TCGOp *op)
76
--
77
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_extu(OptContext *ctx, TCGOp *op)
14
g_assert_not_reached();
15
}
16
17
- ctx->z_mask = z_mask;
18
if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
19
return true;
20
}
21
- return fold_masks(ctx, op);
22
+
23
+ return fold_masks_z(ctx, op, z_mask);
24
}
25
26
static bool fold_mb(OptContext *ctx, TCGOp *op)
27
--
28
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 19 +++++++++++--------
7
1 file changed, 11 insertions(+), 8 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
14
15
static bool fold_movcond(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *tt, *ft;
19
int i;
20
21
/* If true and false values are the same, eliminate the cmp. */
22
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
23
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
24
}
25
26
- ctx->z_mask = arg_info(op->args[3])->z_mask
27
- | arg_info(op->args[4])->z_mask;
28
- ctx->s_mask = arg_info(op->args[3])->s_mask
29
- & arg_info(op->args[4])->s_mask;
30
+ tt = arg_info(op->args[3]);
31
+ ft = arg_info(op->args[4]);
32
+ z_mask = tt->z_mask | ft->z_mask;
33
+ s_mask = tt->s_mask & ft->s_mask;
34
35
- if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
36
- uint64_t tv = arg_info(op->args[3])->val;
37
- uint64_t fv = arg_info(op->args[4])->val;
38
+ if (ti_is_const(tt) && ti_is_const(ft)) {
39
+ uint64_t tv = ti_const_val(tt);
40
+ uint64_t fv = ti_const_val(ft);
41
TCGOpcode opc, negopc = 0;
42
TCGCond cond = op->args[5];
43
44
@@ -XXX,XX +XXX,XX @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
45
}
46
}
47
}
48
- return false;
49
+
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
51
}
52
53
static bool fold_mul(OptContext *ctx, TCGOp *op)
54
--
55
2.43.0
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 6 +++---
5
1 file changed, 3 insertions(+), 3 deletions(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_mul(OptContext *ctx, TCGOp *op)
12
fold_xi_to_x(ctx, op, 1)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
20
@@ -XXX,XX +XXX,XX @@ static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
21
fold_xi_to_i(ctx, op, 0)) {
22
return true;
23
}
24
- return false;
25
+ return finish_folding(ctx, op);
26
}
27
28
static bool fold_multiply2(OptContext *ctx, TCGOp *op)
29
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
30
tcg_opt_gen_movi(ctx, op2, rh, h);
31
return true;
32
}
33
- return false;
34
+ return finish_folding(ctx, op);
35
}
36
37
static bool fold_nand(OptContext *ctx, TCGOp *op)
38
--
39
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_multiply2(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nand(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, -1)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
Avoid the use of the OptContext slots.
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/tci.c | 9 ++-------
6
tcg/optimize.c | 9 ++-------
6
1 file changed, 2 insertions(+), 7 deletions(-)
7
1 file changed, 2 insertions(+), 7 deletions(-)
7
8
8
diff --git a/tcg/tci.c b/tcg/tci.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
11
--- a/tcg/optimize.c
11
+++ b/tcg/tci.c
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
13
regs[index] = value;
14
{
15
/* Set to 1 all bits to the left of the rightmost. */
16
uint64_t z_mask = arg_info(op->args[1])->z_mask;
17
- ctx->z_mask = -(z_mask & -z_mask);
18
+ z_mask = -(z_mask & -z_mask);
19
20
- /*
21
- * Because of fold_sub_to_neg, we want to always return true,
22
- * via finish_folding.
23
- */
24
- finish_folding(ctx, op);
25
- return true;
26
+ return fold_masks_z(ctx, op, z_mask);
14
}
27
}
15
28
16
-static void tci_write_reg8(tcg_target_ulong *regs, TCGReg index, uint8_t value)
29
static bool fold_neg(OptContext *ctx, TCGOp *op)
17
-{
18
- tci_write_reg(regs, index, value);
19
-}
20
-
21
#if TCG_TARGET_REG_BITS == 64
22
static void
23
tci_write_reg16(tcg_target_ulong *regs, TCGReg index, uint16_t value)
24
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
25
t0 = *tb_ptr++;
26
t1 = tci_read_r(regs, &tb_ptr);
27
t2 = tci_read_s32(&tb_ptr);
28
- tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2));
29
+ tci_write_reg(regs, t0, *(uint8_t *)(t1 + t2));
30
break;
31
case INDEX_op_ld8s_i32:
32
TODO();
33
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
34
t0 = *tb_ptr++;
35
t1 = tci_read_r(regs, &tb_ptr);
36
t2 = tci_read_s32(&tb_ptr);
37
- tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2));
38
+ tci_write_reg(regs, t0, *(uint8_t *)(t1 + t2));
39
break;
40
case INDEX_op_ld8s_i64:
41
t0 = *tb_ptr++;
42
--
30
--
43
2.25.1
31
2.43.0
44
45
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
14
15
static bool fold_nor(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2_commutative(ctx, op) ||
20
fold_xi_to_not(ctx, op, 0)) {
21
return true;
22
}
23
24
- ctx->s_mask = arg_info(op->args[1])->s_mask
25
- & arg_info(op->args[2])->s_mask;
26
- return false;
27
+ s_mask = arg_info(op->args[1])->s_mask
28
+ & arg_info(op->args[2])->s_mask;
29
+ return fold_masks_s(ctx, op, s_mask);
30
}
31
32
static bool fold_not(OptContext *ctx, TCGOp *op)
33
--
34
2.43.0
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
Avoid the use of the OptContext slots.
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/tci.c | 7 +------
6
tcg/optimize.c | 7 +------
6
1 file changed, 1 insertion(+), 6 deletions(-)
7
1 file changed, 1 insertion(+), 6 deletions(-)
7
8
8
diff --git a/tcg/tci.c b/tcg/tci.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
11
--- a/tcg/optimize.c
11
+++ b/tcg/tci.c
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
13
*(uint16_t *)(t1 + t2) = t0;
14
if (fold_const1(ctx, op)) {
14
break;
15
return true;
15
case INDEX_op_st_i32:
16
}
16
+ CASE_64(st32)
17
-
17
t0 = tci_read_r32(regs, &tb_ptr);
18
- ctx->s_mask = arg_info(op->args[1])->s_mask;
18
t1 = tci_read_r(regs, &tb_ptr);
19
-
19
t2 = tci_read_s32(&tb_ptr);
20
- /* Because of fold_to_not, we want to always return true, via finish. */
20
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
21
- finish_folding(ctx, op);
21
t2 = tci_read_s32(&tb_ptr);
22
- return true;
22
tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2));
23
+ return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
23
break;
24
}
24
- case INDEX_op_st32_i64:
25
25
- t0 = tci_read_r32(regs, &tb_ptr);
26
static bool fold_or(OptContext *ctx, TCGOp *op)
26
- t1 = tci_read_r(regs, &tb_ptr);
27
- t2 = tci_read_s32(&tb_ptr);
28
- *(uint32_t *)(t1 + t2) = t0;
29
- break;
30
case INDEX_op_st_i64:
31
t0 = tci_read_r64(regs, &tb_ptr);
32
t1 = tci_read_r(regs, &tb_ptr);
33
--
27
--
34
2.25.1
28
2.43.0
35
36
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 13 ++++++++-----
7
1 file changed, 8 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_not(OptContext *ctx, TCGOp *op)
14
15
static bool fold_or(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t z_mask, s_mask;
18
+ TempOptInfo *t1, *t2;
19
+
20
if (fold_const2_commutative(ctx, op) ||
21
fold_xi_to_x(ctx, op, 0) ||
22
fold_xx_to_x(ctx, op)) {
23
return true;
24
}
25
26
- ctx->z_mask = arg_info(op->args[1])->z_mask
27
- | arg_info(op->args[2])->z_mask;
28
- ctx->s_mask = arg_info(op->args[1])->s_mask
29
- & arg_info(op->args[2])->s_mask;
30
- return fold_masks(ctx, op);
31
+ t1 = arg_info(op->args[1]);
32
+ t2 = arg_info(op->args[2]);
33
+ z_mask = t1->z_mask | t2->z_mask;
34
+ s_mask = t1->s_mask & t2->s_mask;
35
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
36
}
37
38
static bool fold_orc(OptContext *ctx, TCGOp *op)
39
--
40
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 8 +++++---
7
1 file changed, 5 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_or(OptContext *ctx, TCGOp *op)
14
15
static bool fold_orc(OptContext *ctx, TCGOp *op)
16
{
17
+ uint64_t s_mask;
18
+
19
if (fold_const2(ctx, op) ||
20
fold_xx_to_i(ctx, op, -1) ||
21
fold_xi_to_x(ctx, op, -1) ||
22
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
23
return true;
24
}
25
26
- ctx->s_mask = arg_info(op->args[1])->s_mask
27
- & arg_info(op->args[2])->s_mask;
28
- return false;
29
+ s_mask = arg_info(op->args[1])->s_mask
30
+ & arg_info(op->args[2])->s_mask;
31
+ return fold_masks_s(ctx, op, s_mask);
32
}
33
34
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
35
--
36
2.43.0
diff view generated by jsdifflib
1
From: Eduardo Habkost <ehabkost@redhat.com>
1
Avoid the use of the OptContext slots.
2
2
3
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
3
Be careful not to call fold_masks_zs when the memory operation
4
Signed-off-by: Claudio Fontana <cfontana@suse.de>
4
is wide enough to require multiple outputs, so split into two
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
functions: fold_qemu_ld_1reg and fold_qemu_ld_2reg.
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
7
Message-Id: <20210204163931.7358-8-cfontana@suse.de>
7
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
9
---
10
include/hw/core/cpu.h | 4 ++--
10
tcg/optimize.c | 26 +++++++++++++++++++++-----
11
accel/tcg/cpu-exec.c | 4 ++--
11
1 file changed, 21 insertions(+), 5 deletions(-)
12
target/arm/cpu.c | 2 +-
13
target/i386/tcg/tcg-cpu.c | 2 +-
14
target/lm32/cpu.c | 2 +-
15
target/s390x/cpu.c | 2 +-
16
target/xtensa/cpu.c | 2 +-
17
7 files changed, 9 insertions(+), 9 deletions(-)
18
12
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
13
diff --git a/tcg/optimize.c b/tcg/optimize.c
20
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/core/cpu.h
15
--- a/tcg/optimize.c
22
+++ b/include/hw/core/cpu.h
16
+++ b/tcg/optimize.c
23
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
17
@@ -XXX,XX +XXX,XX @@ static bool fold_orc(OptContext *ctx, TCGOp *op)
24
bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
18
return fold_masks_s(ctx, op, s_mask);
25
MMUAccessType access_type, int mmu_idx,
19
}
26
bool probe, uintptr_t retaddr);
20
27
+ /** @debug_excp_handler: Callback for handling debug exceptions */
21
-static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
28
+ void (*debug_excp_handler)(CPUState *cpu);
22
+static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
29
23
{
30
} TcgCpuOperations;
24
const TCGOpDef *def = &tcg_op_defs[op->opc];
31
25
MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
32
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
26
MemOp mop = get_memop(oi);
33
* @gdb_write_register: Callback for letting GDB write a register.
27
int width = 8 * memop_size(mop);
34
* @debug_check_watchpoint: Callback: return true if the architectural
28
+ uint64_t z_mask = -1, s_mask = 0;
35
* watchpoint whose address has matched should really fire.
29
36
- * @debug_excp_handler: Callback for handling debug exceptions.
30
if (width < 64) {
37
* @write_elf64_note: Callback for writing a CPU-specific ELF note to a
31
if (mop & MO_SIGN) {
38
* 64-bit VM coredump.
32
- ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
39
* @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
33
+ s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
40
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
34
} else {
41
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
35
- ctx->z_mask = MAKE_64BIT_MASK(0, width);
42
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
36
+ z_mask = MAKE_64BIT_MASK(0, width);
43
bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
44
- void (*debug_excp_handler)(CPUState *cpu);
45
46
int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
47
int cpuid, void *opaque);
48
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/accel/tcg/cpu-exec.c
51
+++ b/accel/tcg/cpu-exec.c
52
@@ -XXX,XX +XXX,XX @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
53
}
37
}
54
}
38
}
55
39
56
- if (cc->debug_excp_handler) {
40
/* Opcodes that touch guest memory stop the mb optimization. */
57
- cc->debug_excp_handler(cpu);
41
ctx->prev_mb = NULL;
58
+ if (cc->tcg_ops.debug_excp_handler) {
42
- return false;
59
+ cc->tcg_ops.debug_excp_handler(cpu);
43
+
60
}
44
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
45
+}
46
+
47
+static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
48
+{
49
+ /* Opcodes that touch guest memory stop the mb optimization. */
50
+ ctx->prev_mb = NULL;
51
+ return finish_folding(ctx, op);
61
}
52
}
62
53
63
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
54
static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
64
index XXXXXXX..XXXXXXX 100644
55
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
65
--- a/target/arm/cpu.c
56
break;
66
+++ b/target/arm/cpu.c
57
case INDEX_op_qemu_ld_a32_i32:
67
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
58
case INDEX_op_qemu_ld_a64_i32:
68
cc->tcg_ops.cpu_exec_interrupt = arm_cpu_exec_interrupt;
59
+ done = fold_qemu_ld_1reg(&ctx, op);
69
cc->tcg_ops.synchronize_from_tb = arm_cpu_synchronize_from_tb;
60
+ break;
70
cc->tcg_ops.tlb_fill = arm_cpu_tlb_fill;
61
case INDEX_op_qemu_ld_a32_i64:
71
- cc->debug_excp_handler = arm_debug_excp_handler;
62
case INDEX_op_qemu_ld_a64_i64:
72
+ cc->tcg_ops.debug_excp_handler = arm_debug_excp_handler;
63
+ if (TCG_TARGET_REG_BITS == 64) {
73
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
64
+ done = fold_qemu_ld_1reg(&ctx, op);
74
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
65
+ break;
75
#if !defined(CONFIG_USER_ONLY)
66
+ }
76
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
67
+ QEMU_FALLTHROUGH;
77
index XXXXXXX..XXXXXXX 100644
68
case INDEX_op_qemu_ld_a32_i128:
78
--- a/target/i386/tcg/tcg-cpu.c
69
case INDEX_op_qemu_ld_a64_i128:
79
+++ b/target/i386/tcg/tcg-cpu.c
70
- done = fold_qemu_ld(&ctx, op);
80
@@ -XXX,XX +XXX,XX @@ void tcg_cpu_common_class_init(CPUClass *cc)
71
+ done = fold_qemu_ld_2reg(&ctx, op);
81
cc->tcg_ops.initialize = tcg_x86_init;
72
break;
82
cc->tcg_ops.tlb_fill = x86_cpu_tlb_fill;
73
case INDEX_op_qemu_st8_a32_i32:
83
#ifndef CONFIG_USER_ONLY
74
case INDEX_op_qemu_st8_a64_i32:
84
- cc->debug_excp_handler = breakpoint_handler;
85
+ cc->tcg_ops.debug_excp_handler = breakpoint_handler;
86
#endif
87
}
88
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
89
index XXXXXXX..XXXXXXX 100644
90
--- a/target/lm32/cpu.c
91
+++ b/target/lm32/cpu.c
92
@@ -XXX,XX +XXX,XX @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
93
#endif
94
cc->gdb_num_core_regs = 32 + 7;
95
cc->gdb_stop_before_watchpoint = true;
96
- cc->debug_excp_handler = lm32_debug_excp_handler;
97
+ cc->tcg_ops.debug_excp_handler = lm32_debug_excp_handler;
98
cc->disas_set_info = lm32_cpu_disas_set_info;
99
cc->tcg_ops.initialize = lm32_translate_init;
100
}
101
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
102
index XXXXXXX..XXXXXXX 100644
103
--- a/target/s390x/cpu.c
104
+++ b/target/s390x/cpu.c
105
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
106
cc->write_elf64_note = s390_cpu_write_elf64_note;
107
#ifdef CONFIG_TCG
108
cc->tcg_ops.cpu_exec_interrupt = s390_cpu_exec_interrupt;
109
- cc->debug_excp_handler = s390x_cpu_debug_excp_handler;
110
+ cc->tcg_ops.debug_excp_handler = s390x_cpu_debug_excp_handler;
111
cc->do_unaligned_access = s390x_cpu_do_unaligned_access;
112
#endif
113
#endif
114
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
115
index XXXXXXX..XXXXXXX 100644
116
--- a/target/xtensa/cpu.c
117
+++ b/target/xtensa/cpu.c
118
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
119
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
120
cc->do_transaction_failed = xtensa_cpu_do_transaction_failed;
121
#endif
122
- cc->debug_excp_handler = xtensa_breakpoint_handler;
123
+ cc->tcg_ops.debug_excp_handler = xtensa_breakpoint_handler;
124
cc->disas_set_info = xtensa_cpu_disas_set_info;
125
cc->tcg_ops.initialize = xtensa_translate_init;
126
dc->vmsd = &vmstate_xtensa_cpu;
127
--
75
--
128
2.25.1
76
2.43.0
129
130
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Stores have no output operands, and so need no further work.
2
2
3
add a new optional interface to CPUClass, which allows accelerators
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
to extend the CPUClass with additional accelerator-specific
5
initializations.
6
7
This will allow to separate the target cpu code that is specific
8
to each accelerator, and register it automatically with object
9
hierarchy lookup depending on accelerator code availability,
10
as part of the accel_init_interfaces() initialization step.
11
12
Signed-off-by: Claudio Fontana <cfontana@suse.de>
13
Message-Id: <20210204163931.7358-19-cfontana@suse.de>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
5
---
16
include/hw/core/accel-cpu.h | 38 ++++++++++++++++++++++++++++++++
6
tcg/optimize.c | 11 +++++------
17
include/hw/core/cpu.h | 4 ++++
7
1 file changed, 5 insertions(+), 6 deletions(-)
18
accel/accel-common.c | 44 +++++++++++++++++++++++++++++++++++++
19
MAINTAINERS | 1 +
20
4 files changed, 87 insertions(+)
21
create mode 100644 include/hw/core/accel-cpu.h
22
8
23
diff --git a/include/hw/core/accel-cpu.h b/include/hw/core/accel-cpu.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
24
new file mode 100644
25
index XXXXXXX..XXXXXXX
26
--- /dev/null
27
+++ b/include/hw/core/accel-cpu.h
28
@@ -XXX,XX +XXX,XX @@
29
+/*
30
+ * Accelerator interface, specializes CPUClass
31
+ * This header is used only by target-specific code.
32
+ *
33
+ * Copyright 2021 SUSE LLC
34
+ *
35
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
36
+ * See the COPYING file in the top-level directory.
37
+ */
38
+
39
+#ifndef ACCEL_CPU_H
40
+#define ACCEL_CPU_H
41
+
42
+/*
43
+ * This header is used to define new accelerator-specific target-specific
44
+ * accelerator cpu subclasses.
45
+ * It uses CPU_RESOLVING_TYPE, so this is clearly target-specific.
46
+ *
47
+ * Do not try to use for any other purpose than the implementation of new
48
+ * subclasses in target/, or the accel implementation itself in accel/
49
+ */
50
+
51
+#define TYPE_ACCEL_CPU "accel-" CPU_RESOLVING_TYPE
52
+#define ACCEL_CPU_NAME(name) (name "-" TYPE_ACCEL_CPU)
53
+typedef struct AccelCPUClass AccelCPUClass;
54
+DECLARE_CLASS_CHECKERS(AccelCPUClass, ACCEL_CPU, TYPE_ACCEL_CPU)
55
+
56
+typedef struct AccelCPUClass {
57
+ /*< private >*/
58
+ ObjectClass parent_class;
59
+ /*< public >*/
60
+
61
+ void (*cpu_class_init)(CPUClass *cc);
62
+ void (*cpu_instance_init)(CPUState *cpu);
63
+ void (*cpu_realizefn)(CPUState *cpu, Error **errp);
64
+} AccelCPUClass;
65
+
66
+#endif /* ACCEL_CPU_H */
67
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
68
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
69
--- a/include/hw/core/cpu.h
11
--- a/tcg/optimize.c
70
+++ b/include/hw/core/cpu.h
12
+++ b/tcg/optimize.c
71
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock;
13
@@ -XXX,XX +XXX,XX @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
72
/* see tcg-cpu-ops.h */
14
{
73
struct TCGCPUOps;
15
/* Opcodes that touch guest memory stop the mb optimization. */
74
16
ctx->prev_mb = NULL;
75
+/* see accel-cpu.h */
17
- return false;
76
+struct AccelCPUClass;
18
+ return true;
77
+
78
/**
79
* CPUClass:
80
* @class_by_name: Callback to map -cpu command line model name to an
81
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
82
/* Keep non-pointer data at the end to minimize holes. */
83
int gdb_num_core_regs;
84
bool gdb_stop_before_watchpoint;
85
+ struct AccelCPUClass *accel_cpu;
86
87
/* when TCG is not available, this pointer is NULL */
88
struct TCGCPUOps *tcg_ops;
89
diff --git a/accel/accel-common.c b/accel/accel-common.c
90
index XXXXXXX..XXXXXXX 100644
91
--- a/accel/accel-common.c
92
+++ b/accel/accel-common.c
93
@@ -XXX,XX +XXX,XX @@
94
#include "qemu/osdep.h"
95
#include "qemu/accel.h"
96
97
+#include "cpu.h"
98
+#include "hw/core/accel-cpu.h"
99
+
100
#ifndef CONFIG_USER_ONLY
101
#include "accel-softmmu.h"
102
#endif /* !CONFIG_USER_ONLY */
103
@@ -XXX,XX +XXX,XX @@ AccelClass *accel_find(const char *opt_name)
104
return ac;
105
}
19
}
106
20
107
+static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
21
static bool fold_remainder(OptContext *ctx, TCGOp *op)
108
+{
22
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
109
+ CPUClass *cc = CPU_CLASS(klass);
23
110
+ AccelCPUClass *accel_cpu = opaque;
24
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
111
+
25
remove_mem_copy_all(ctx);
112
+ cc->accel_cpu = accel_cpu;
26
- return false;
113
+ if (accel_cpu->cpu_class_init) {
27
+ return true;
114
+ accel_cpu->cpu_class_init(cc);
28
}
115
+ }
29
116
+}
30
switch (op->opc) {
117
+
31
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
118
+/* initialize the arch-specific accel CpuClass interfaces */
32
g_assert_not_reached();
119
+static void accel_init_cpu_interfaces(AccelClass *ac)
33
}
120
+{
34
remove_mem_copy_in(ctx, ofs, ofs + lm1);
121
+ const char *ac_name; /* AccelClass name */
35
- return false;
122
+ char *acc_name; /* AccelCPUClass name */
36
+ return true;
123
+ ObjectClass *acc; /* AccelCPUClass */
124
+
125
+ ac_name = object_class_get_name(OBJECT_CLASS(ac));
126
+ g_assert(ac_name != NULL);
127
+
128
+ acc_name = g_strdup_printf("%s-%s", ac_name, CPU_RESOLVING_TYPE);
129
+ acc = object_class_by_name(acc_name);
130
+ g_free(acc_name);
131
+
132
+ if (acc) {
133
+ object_class_foreach(accel_init_cpu_int_aux,
134
+ CPU_RESOLVING_TYPE, false, acc);
135
+ }
136
+}
137
+
138
void accel_init_interfaces(AccelClass *ac)
139
{
140
#ifndef CONFIG_USER_ONLY
141
accel_init_ops_interfaces(ac);
142
#endif /* !CONFIG_USER_ONLY */
143
+
144
+ accel_init_cpu_interfaces(ac);
145
}
37
}
146
38
147
+static const TypeInfo accel_cpu_type = {
39
static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
148
+ .name = TYPE_ACCEL_CPU,
40
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
149
+ .parent = TYPE_OBJECT,
41
TCGType type;
150
+ .abstract = true,
42
151
+ .class_size = sizeof(AccelCPUClass),
43
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
152
+};
44
- fold_tcg_st(ctx, op);
153
+
45
- return false;
154
static void register_accel_types(void)
46
+ return fold_tcg_st(ctx, op);
155
{
47
}
156
type_register_static(&accel_type);
48
157
+ type_register_static(&accel_cpu_type);
49
src = arg_temp(op->args[0]);
50
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
51
last = ofs + tcg_type_size(type) - 1;
52
remove_mem_copy_in(ctx, ofs, last);
53
record_mem_copy(ctx, type, src, ofs, last);
54
- return false;
55
+ return true;
158
}
56
}
159
57
160
type_init(register_accel_types);
58
static bool fold_xor(OptContext *ctx, TCGOp *op)
161
diff --git a/MAINTAINERS b/MAINTAINERS
162
index XXXXXXX..XXXXXXX 100644
163
--- a/MAINTAINERS
164
+++ b/MAINTAINERS
165
@@ -XXX,XX +XXX,XX @@ R: Paolo Bonzini <pbonzini@redhat.com>
166
S: Maintained
167
F: include/qemu/accel.h
168
F: include/sysemu/accel-ops.h
169
+F: include/hw/core/accel-cpu.h
170
F: accel/accel-*.c
171
F: accel/Makefile.objs
172
F: accel/stubs/Makefile.objs
173
--
59
--
174
2.25.1
60
2.43.0
175
176
diff view generated by jsdifflib
New patch
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
tcg/optimize.c | 2 +-
5
1 file changed, 1 insertion(+), 1 deletion(-)
1
6
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
8
index XXXXXXX..XXXXXXX 100644
9
--- a/tcg/optimize.c
10
+++ b/tcg/optimize.c
11
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
12
fold_xx_to_i(ctx, op, 0)) {
13
return true;
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
17
}
18
19
static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
20
--
21
2.43.0
diff view generated by jsdifflib
New patch
1
Change return from bool to int; distinguish between
2
complete folding, simplification, and no change.
1
3
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/optimize.c | 22 ++++++++++++++--------
8
1 file changed, 14 insertions(+), 8 deletions(-)
9
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/optimize.c
13
+++ b/tcg/optimize.c
14
@@ -XXX,XX +XXX,XX @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
15
return finish_folding(ctx, op);
16
}
17
18
-static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
19
+/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
20
+static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
21
{
22
uint64_t a_zmask, b_val;
23
TCGCond cond;
24
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
25
op->opc = xor_opc;
26
op->args[2] = arg_new_constant(ctx, 1);
27
}
28
- return false;
29
+ return -1;
30
}
31
}
32
-
33
- return false;
34
+ return 0;
35
}
36
37
static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
38
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
39
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
40
}
41
42
- if (fold_setcond_zmask(ctx, op, false)) {
43
+ i = fold_setcond_zmask(ctx, op, false);
44
+ if (i > 0) {
45
return true;
46
}
47
- fold_setcond_tst_pow2(ctx, op, false);
48
+ if (i == 0) {
49
+ fold_setcond_tst_pow2(ctx, op, false);
50
+ }
51
52
ctx->z_mask = 1;
53
return false;
54
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
55
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
56
}
57
58
- if (fold_setcond_zmask(ctx, op, true)) {
59
+ i = fold_setcond_zmask(ctx, op, true);
60
+ if (i > 0) {
61
return true;
62
}
63
- fold_setcond_tst_pow2(ctx, op, true);
64
+ if (i == 0) {
65
+ fold_setcond_tst_pow2(ctx, op, true);
66
+ }
67
68
/* Value is {0,-1} so all bits are repetitions of the sign. */
69
ctx->s_mask = -1;
70
--
71
2.43.0
diff view generated by jsdifflib
New patch
1
Avoid the use of the OptContext slots.
1
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/optimize.c | 3 +--
7
1 file changed, 1 insertion(+), 2 deletions(-)
8
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/optimize.c
12
+++ b/tcg/optimize.c
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
14
fold_setcond_tst_pow2(ctx, op, false);
15
}
16
17
- ctx->z_mask = 1;
18
- return false;
19
+ return fold_masks_z(ctx, op, 1);
20
}
21
22
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
23
--
24
2.43.0
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Avoid the use of the OptContext slots.
2
2
3
for now only TCG is allowed as an accelerator for riscv,
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
so remove the CONFIG_TCG use.
5
6
Signed-off-by: Claudio Fontana <cfontana@suse.de>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Message-Id: <20210204163931.7358-3-cfontana@suse.de>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
5
---
12
target/riscv/cpu.c | 3 +--
6
tcg/optimize.c | 3 +--
13
1 file changed, 1 insertion(+), 2 deletions(-)
7
1 file changed, 1 insertion(+), 2 deletions(-)
14
8
15
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu.c
11
--- a/tcg/optimize.c
18
+++ b/target/riscv/cpu.c
12
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
20
#endif
14
}
21
cc->gdb_arch_name = riscv_gdb_arch_name;
15
22
cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
16
/* Value is {0,-1} so all bits are repetitions of the sign. */
23
-#ifdef CONFIG_TCG
17
- ctx->s_mask = -1;
24
cc->tcg_ops.initialize = riscv_translate_init;
18
- return false;
25
cc->tlb_fill = riscv_cpu_tlb_fill;
19
+ return fold_masks_s(ctx, op, -1);
26
-#endif
27
+
28
device_class_set_props(dc, riscv_cpu_properties);
29
}
20
}
30
21
22
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
31
--
23
--
32
2.25.1
24
2.43.0
33
34
diff view generated by jsdifflib
1
As noted in several comments, 8 regs is not enough for 32-bit
1
Avoid the use of the OptContext slots.
2
to perform calls, as currently implemented. Shortly, we will
3
rearrange the encoding which will make 32 regs impossible.
4
2
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
tcg/tci/tcg-target.h | 32 +++++---------------------------
6
tcg/optimize.c | 3 +--
9
tcg/tci/tcg-target.c.inc | 26 --------------------------
7
1 file changed, 1 insertion(+), 2 deletions(-)
10
2 files changed, 5 insertions(+), 53 deletions(-)
11
8
12
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tci/tcg-target.h
11
--- a/tcg/optimize.c
15
+++ b/tcg/tci/tcg-target.h
12
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
17
#define TCG_TARGET_HAS_mulu2_i32 1
14
return fold_setcond(ctx, op);
18
#endif /* TCG_TARGET_REG_BITS == 64 */
15
}
19
16
20
-/* Number of registers available.
17
- ctx->z_mask = 1;
21
- For 32 bit hosts, we need more than 8 registers (call arguments). */
18
- return false;
22
-/* #define TCG_TARGET_NB_REGS 8 */
19
+ return fold_masks_z(ctx, op, 1);
23
+/* Number of registers available. */
20
24
#define TCG_TARGET_NB_REGS 16
21
do_setcond_const:
25
-/* #define TCG_TARGET_NB_REGS 32 */
22
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
26
27
/* List of registers which are used by TCG. */
28
typedef enum {
29
@@ -XXX,XX +XXX,XX @@ typedef enum {
30
TCG_REG_R5,
31
TCG_REG_R6,
32
TCG_REG_R7,
33
-#if TCG_TARGET_NB_REGS >= 16
34
TCG_REG_R8,
35
TCG_REG_R9,
36
TCG_REG_R10,
37
@@ -XXX,XX +XXX,XX @@ typedef enum {
38
TCG_REG_R13,
39
TCG_REG_R14,
40
TCG_REG_R15,
41
-#if TCG_TARGET_NB_REGS >= 32
42
- TCG_REG_R16,
43
- TCG_REG_R17,
44
- TCG_REG_R18,
45
- TCG_REG_R19,
46
- TCG_REG_R20,
47
- TCG_REG_R21,
48
- TCG_REG_R22,
49
- TCG_REG_R23,
50
- TCG_REG_R24,
51
- TCG_REG_R25,
52
- TCG_REG_R26,
53
- TCG_REG_R27,
54
- TCG_REG_R28,
55
- TCG_REG_R29,
56
- TCG_REG_R30,
57
- TCG_REG_R31,
58
-#endif
59
-#endif
60
+
61
+ TCG_AREG0 = TCG_REG_R14,
62
+ TCG_REG_CALL_STACK = TCG_REG_R15,
63
+
64
/* Special value UINT8_MAX is used by TCI to encode constant values. */
65
TCG_CONST = UINT8_MAX
66
} TCGReg;
67
68
-#define TCG_AREG0 (TCG_TARGET_NB_REGS - 2)
69
-
70
/* Used for function call generation. */
71
-#define TCG_REG_CALL_STACK (TCG_TARGET_NB_REGS - 1)
72
#define TCG_TARGET_CALL_STACK_OFFSET 0
73
#define TCG_TARGET_STACK_ALIGN 16
74
75
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
76
index XXXXXXX..XXXXXXX 100644
77
--- a/tcg/tci/tcg-target.c.inc
78
+++ b/tcg/tci/tcg-target.c.inc
79
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
80
TCG_REG_R5,
81
TCG_REG_R6,
82
TCG_REG_R7,
83
-#if TCG_TARGET_NB_REGS >= 16
84
TCG_REG_R8,
85
TCG_REG_R9,
86
TCG_REG_R10,
87
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
88
TCG_REG_R13,
89
TCG_REG_R14,
90
TCG_REG_R15,
91
-#endif
92
};
93
94
#if MAX_OPC_PARAM_IARGS != 6
95
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_call_iarg_regs[] = {
96
#if TCG_TARGET_REG_BITS == 32
97
/* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */
98
TCG_REG_R7,
99
-#if TCG_TARGET_NB_REGS >= 16
100
TCG_REG_R8,
101
TCG_REG_R9,
102
TCG_REG_R10,
103
TCG_REG_R11,
104
TCG_REG_R12,
105
-#else
106
-# error Too few input registers available
107
-#endif
108
#endif
109
};
110
111
@@ -XXX,XX +XXX,XX @@ static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
112
"r05",
113
"r06",
114
"r07",
115
-#if TCG_TARGET_NB_REGS >= 16
116
"r08",
117
"r09",
118
"r10",
119
@@ -XXX,XX +XXX,XX @@ static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
120
"r13",
121
"r14",
122
"r15",
123
-#if TCG_TARGET_NB_REGS >= 32
124
- "r16",
125
- "r17",
126
- "r18",
127
- "r19",
128
- "r20",
129
- "r21",
130
- "r22",
131
- "r23",
132
- "r24",
133
- "r25",
134
- "r26",
135
- "r27",
136
- "r28",
137
- "r29",
138
- "r30",
139
- "r31"
140
-#endif
141
-#endif
142
};
143
#endif
144
145
--
23
--
146
2.25.1
24
2.43.0
147
148
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
3
cc->do_interrupt is in theory a TCG callback used in accel/tcg only,
4
to prepare the emulated architecture to take an interrupt as defined
5
in the hardware specifications,
6
7
but in reality the _do_interrupt style of functions in targets are
8
also occasionally reused by KVM to prepare the architecture state in a
9
similar way where userspace code has identified that it needs to
10
deliver an exception to the guest.
11
12
In the case of ARM, that includes:
13
14
1) the vcpu thread got a SIGBUS indicating a memory error,
15
and we need to deliver a Synchronous External Abort to the guest to
16
let it know about the error.
17
2) the kernel told us about a debug exception (breakpoint, watchpoint)
18
but it is not for one of QEMU's own gdbstub breakpoints/watchpoints
19
so it must be a breakpoint the guest itself has set up, therefore
20
we need to deliver it to the guest.
21
22
So in order to reuse code, the same arm_do_interrupt function is used.
23
This is all fine, but we need to avoid calling it using the callback
24
registered in CPUClass, since that one is now TCG-only.
25
26
Fortunately this is easily solved by replacing calls to
27
CPUClass::do_interrupt() with explicit calls to arm_do_interrupt().
28
29
Signed-off-by: Claudio Fontana <cfontana@suse.de>
30
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
31
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
32
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
33
Cc: Peter Maydell <peter.maydell@linaro.org>
34
Message-Id: <20210204163931.7358-9-cfontana@suse.de>
35
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
36
---
3
---
37
target/arm/helper.c | 4 ++++
4
tcg/optimize.c | 2 +-
38
target/arm/kvm64.c | 6 ++----
5
1 file changed, 1 insertion(+), 1 deletion(-)
39
2 files changed, 6 insertions(+), 4 deletions(-)
40
6
41
diff --git a/target/arm/helper.c b/target/arm/helper.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
43
--- a/target/arm/helper.c
9
--- a/tcg/optimize.c
44
+++ b/target/arm/helper.c
10
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ static void handle_semihosting(CPUState *cs)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
46
* Do any appropriate logging, handle PSCI calls, and then hand off
12
if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
47
* to the AArch64-entry or AArch32-entry function depending on the
13
op->args[3] = tcg_swap_cond(op->args[3]);
48
* target exception level's register width.
14
}
49
+ *
15
- return false;
50
+ * Note: this is used for both TCG (as the do_interrupt tcg op),
16
+ return finish_folding(ctx, op);
51
+ * and KVM to re-inject guest debug exceptions, and to
52
+ * inject a Synchronous-External-Abort.
53
*/
54
void arm_cpu_do_interrupt(CPUState *cs)
55
{
56
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/target/arm/kvm64.c
59
+++ b/target/arm/kvm64.c
60
@@ -XXX,XX +XXX,XX @@ static void kvm_inject_arm_sea(CPUState *c)
61
{
62
ARMCPU *cpu = ARM_CPU(c);
63
CPUARMState *env = &cpu->env;
64
- CPUClass *cc = CPU_GET_CLASS(c);
65
uint32_t esr;
66
bool same_el;
67
68
@@ -XXX,XX +XXX,XX @@ static void kvm_inject_arm_sea(CPUState *c)
69
70
env->exception.syndrome = esr;
71
72
- cc->do_interrupt(c);
73
+ arm_cpu_do_interrupt(c);
74
}
17
}
75
18
76
#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
19
static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
77
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
78
{
79
int hsr_ec = syn_get_ec(debug_exit->hsr);
80
ARMCPU *cpu = ARM_CPU(cs);
81
- CPUClass *cc = CPU_GET_CLASS(cs);
82
CPUARMState *env = &cpu->env;
83
84
/* Ensure PC is synchronised */
85
@@ -XXX,XX +XXX,XX @@ bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
86
env->exception.vaddress = debug_exit->far;
87
env->exception.target_el = 1;
88
qemu_mutex_lock_iothread();
89
- cc->do_interrupt(cs);
90
+ arm_cpu_do_interrupt(cs);
91
qemu_mutex_unlock_iothread();
92
93
return false;
94
--
20
--
95
2.25.1
21
2.43.0
96
97
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
3
---
5
tcg/tci.c | 8 --------
4
tcg/optimize.c | 2 +-
6
1 file changed, 8 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
7
6
8
diff --git a/tcg/tci.c b/tcg/tci.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
9
--- a/tcg/optimize.c
11
+++ b/tcg/tci.c
10
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@
11
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
13
#include "tcg/tcg-op.h"
12
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
14
#include "qemu/compiler.h"
13
op->args[5] = tcg_invert_cond(op->args[5]);
15
14
}
16
-/* Marker for missing code. */
15
- return false;
17
-#define TODO() \
16
+ return finish_folding(ctx, op);
18
- do { \
17
}
19
- fprintf(stderr, "TODO %s:%u: %s()\n", \
18
20
- __FILE__, __LINE__, __func__); \
19
static bool fold_sextract(OptContext *ctx, TCGOp *op)
21
- tcg_abort(); \
22
- } while (0)
23
-
24
#if MAX_OPC_PARAM_IARGS != 6
25
# error Fix needed, number of supported input arguments changed!
26
#endif
27
--
20
--
28
2.25.1
21
2.43.0
29
30
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/tci.c | 10 +---------
6
tcg/optimize.c | 24 +++++++++---------------
6
1 file changed, 1 insertion(+), 9 deletions(-)
7
1 file changed, 9 insertions(+), 15 deletions(-)
7
8
8
diff --git a/tcg/tci.c b/tcg/tci.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
11
--- a/tcg/optimize.c
11
+++ b/tcg/tci.c
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
13
regs[index] = value;
14
static bool fold_sextract(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t z_mask, s_mask, s_mask_old;
17
+ TempOptInfo *t1 = arg_info(op->args[1]);
18
int pos = op->args[2];
19
int len = op->args[3];
20
21
- if (arg_is_const(op->args[1])) {
22
- uint64_t t;
23
-
24
- t = arg_info(op->args[1])->val;
25
- t = sextract64(t, pos, len);
26
- return tcg_opt_gen_movi(ctx, op, op->args[0], t);
27
+ if (ti_is_const(t1)) {
28
+ return tcg_opt_gen_movi(ctx, op, op->args[0],
29
+ sextract64(ti_const_val(t1), pos, len));
30
}
31
32
- z_mask = arg_info(op->args[1])->z_mask;
33
- z_mask = sextract64(z_mask, pos, len);
34
- ctx->z_mask = z_mask;
35
-
36
- s_mask_old = arg_info(op->args[1])->s_mask;
37
- s_mask = sextract64(s_mask_old, pos, len);
38
- s_mask |= MAKE_64BIT_MASK(len, 64 - len);
39
- ctx->s_mask = s_mask;
40
+ s_mask_old = t1->s_mask;
41
+ s_mask = s_mask_old >> pos;
42
+ s_mask |= -1ull << (len - 1);
43
44
if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
45
return true;
46
}
47
48
- return fold_masks(ctx, op);
49
+ z_mask = sextract64(t1->z_mask, pos, len);
50
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
14
}
51
}
15
52
16
-#if TCG_TARGET_REG_BITS == 64
53
static bool fold_shift(OptContext *ctx, TCGOp *op)
17
-static void
18
-tci_write_reg16(tcg_target_ulong *regs, TCGReg index, uint16_t value)
19
-{
20
- tci_write_reg(regs, index, value);
21
-}
22
-#endif
23
-
24
static void
25
tci_write_reg32(tcg_target_ulong *regs, TCGReg index, uint32_t value)
26
{
27
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
28
t0 = *tb_ptr++;
29
t1 = tci_read_r(regs, &tb_ptr);
30
t2 = tci_read_s32(&tb_ptr);
31
- tci_write_reg16(regs, t0, *(uint16_t *)(t1 + t2));
32
+ tci_write_reg(regs, t0, *(uint16_t *)(t1 + t2));
33
break;
34
case INDEX_op_ld16s_i64:
35
TODO();
36
--
54
--
37
2.25.1
55
2.43.0
38
39
diff view generated by jsdifflib
1
Three TODO instances are never happen cases.
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Other uses of tcg_abort are also indicating unreachable cases.
3
2
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Stefan Weil <sw@weilnetz.de>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
tcg/tci.c | 15 +++++++--------
6
tcg/optimize.c | 27 ++++++++++++++-------------
10
1 file changed, 7 insertions(+), 8 deletions(-)
7
1 file changed, 14 insertions(+), 13 deletions(-)
11
8
12
diff --git a/tcg/tci.c b/tcg/tci.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tci.c
11
--- a/tcg/optimize.c
15
+++ b/tcg/tci.c
12
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
17
result = (u0 > u1);
14
static bool fold_shift(OptContext *ctx, TCGOp *op)
15
{
16
uint64_t s_mask, z_mask, sign;
17
+ TempOptInfo *t1, *t2;
18
19
if (fold_const2(ctx, op) ||
20
fold_ix_to_i(ctx, op, 0) ||
21
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
22
return true;
23
}
24
25
- s_mask = arg_info(op->args[1])->s_mask;
26
- z_mask = arg_info(op->args[1])->z_mask;
27
+ t1 = arg_info(op->args[1]);
28
+ t2 = arg_info(op->args[2]);
29
+ s_mask = t1->s_mask;
30
+ z_mask = t1->z_mask;
31
32
- if (arg_is_const(op->args[2])) {
33
- int sh = arg_info(op->args[2])->val;
34
-
35
- ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
36
+ if (ti_is_const(t2)) {
37
+ int sh = ti_const_val(t2);
38
39
+ z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
40
s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
41
42
- return fold_masks(ctx, op);
43
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
44
}
45
46
switch (op->opc) {
47
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
48
* Arithmetic right shift will not reduce the number of
49
* input sign repetitions.
50
*/
51
- ctx->s_mask = s_mask;
52
- break;
53
+ return fold_masks_s(ctx, op, s_mask);
54
CASE_OP_32_64(shr):
55
/*
56
* If the sign bit is known zero, then logical right shift
57
- * will not reduced the number of input sign repetitions.
58
+ * will not reduce the number of input sign repetitions.
59
*/
60
- sign = (s_mask & -s_mask) >> 1;
61
+ sign = -s_mask;
62
if (sign && !(z_mask & sign)) {
63
- ctx->s_mask = s_mask;
64
+ return fold_masks_s(ctx, op, s_mask);
65
}
18
break;
66
break;
19
default:
67
default:
20
- TODO();
68
break;
21
+ g_assert_not_reached();
22
}
69
}
23
return result;
70
71
- return false;
72
+ return finish_folding(ctx, op);
24
}
73
}
25
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
74
26
result = (u0 > u1);
75
static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
27
break;
28
default:
29
- TODO();
30
+ g_assert_not_reached();
31
}
32
return result;
33
}
34
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
35
tmp32 = qemu_ld_beul;
36
break;
37
default:
38
- tcg_abort();
39
+ g_assert_not_reached();
40
}
41
tci_write_reg(regs, t0, tmp32);
42
break;
43
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
44
tmp64 = qemu_ld_beq;
45
break;
46
default:
47
- tcg_abort();
48
+ g_assert_not_reached();
49
}
50
tci_write_reg(regs, t0, tmp64);
51
if (TCG_TARGET_REG_BITS == 32) {
52
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
53
qemu_st_bel(t0);
54
break;
55
default:
56
- tcg_abort();
57
+ g_assert_not_reached();
58
}
59
break;
60
case INDEX_op_qemu_st_i64:
61
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
62
qemu_st_beq(tmp64);
63
break;
64
default:
65
- tcg_abort();
66
+ g_assert_not_reached();
67
}
68
break;
69
case INDEX_op_mb:
70
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
71
smp_mb();
72
break;
73
default:
74
- TODO();
75
- break;
76
+ g_assert_not_reached();
77
}
78
tci_assert(tb_ptr == old_code_ptr + op_size);
79
}
80
--
76
--
81
2.25.1
77
2.43.0
82
83
diff view generated by jsdifflib
1
Trivially implemented like other arithmetic.
1
Merge the two conditions, sign != 0 && !(z_mask & sign),
2
Tested via check-tcg and the ppc64 target.
2
by testing ~z_mask & sign. If sign == 0, the logical and
3
will produce false.
3
4
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
tcg/tci/tcg-target.h | 4 ++--
8
tcg/optimize.c | 5 ++---
9
tcg/tci.c | 28 ++++++++++++++++++++++------
9
1 file changed, 2 insertions(+), 3 deletions(-)
10
tcg/tci/tcg-target.c.inc | 10 ++++------
11
3 files changed, 28 insertions(+), 14 deletions(-)
12
10
13
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
11
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/tci/tcg-target.h
13
--- a/tcg/optimize.c
16
+++ b/tcg/tci/tcg-target.h
14
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
18
#define TCG_TARGET_HAS_extract_i64 0
16
19
#define TCG_TARGET_HAS_sextract_i64 0
17
static bool fold_shift(OptContext *ctx, TCGOp *op)
20
#define TCG_TARGET_HAS_extract2_i64 0
18
{
21
-#define TCG_TARGET_HAS_div_i64 0
19
- uint64_t s_mask, z_mask, sign;
22
-#define TCG_TARGET_HAS_rem_i64 0
20
+ uint64_t s_mask, z_mask;
23
+#define TCG_TARGET_HAS_div_i64 1
21
TempOptInfo *t1, *t2;
24
+#define TCG_TARGET_HAS_rem_i64 1
22
25
#define TCG_TARGET_HAS_ext8s_i64 1
23
if (fold_const2(ctx, op) ||
26
#define TCG_TARGET_HAS_ext16s_i64 1
24
@@ -XXX,XX +XXX,XX @@ static bool fold_shift(OptContext *ctx, TCGOp *op)
27
#define TCG_TARGET_HAS_ext32s_i64 1
25
* If the sign bit is known zero, then logical right shift
28
diff --git a/tcg/tci.c b/tcg/tci.c
26
* will not reduce the number of input sign repetitions.
29
index XXXXXXX..XXXXXXX 100644
27
*/
30
--- a/tcg/tci.c
28
- sign = -s_mask;
31
+++ b/tcg/tci.c
29
- if (sign && !(z_mask & sign)) {
32
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
30
+ if (~z_mask & -s_mask) {
33
t2 = tci_read_ri64(regs, &tb_ptr);
31
return fold_masks_s(ctx, op, s_mask);
34
tci_write_reg(regs, t0, t1 * t2);
32
}
35
break;
36
-#if TCG_TARGET_HAS_div_i64
37
case INDEX_op_div_i64:
38
- case INDEX_op_divu_i64:
39
- case INDEX_op_rem_i64:
40
- case INDEX_op_remu_i64:
41
- TODO();
42
+ t0 = *tb_ptr++;
43
+ t1 = tci_read_ri64(regs, &tb_ptr);
44
+ t2 = tci_read_ri64(regs, &tb_ptr);
45
+ tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2);
46
+ break;
47
+ case INDEX_op_divu_i64:
48
+ t0 = *tb_ptr++;
49
+ t1 = tci_read_ri64(regs, &tb_ptr);
50
+ t2 = tci_read_ri64(regs, &tb_ptr);
51
+ tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2);
52
+ break;
53
+ case INDEX_op_rem_i64:
54
+ t0 = *tb_ptr++;
55
+ t1 = tci_read_ri64(regs, &tb_ptr);
56
+ t2 = tci_read_ri64(regs, &tb_ptr);
57
+ tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2);
58
+ break;
59
+ case INDEX_op_remu_i64:
60
+ t0 = *tb_ptr++;
61
+ t1 = tci_read_ri64(regs, &tb_ptr);
62
+ t2 = tci_read_ri64(regs, &tb_ptr);
63
+ tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2);
64
break;
65
-#endif
66
case INDEX_op_and_i64:
67
t0 = *tb_ptr++;
68
t1 = tci_read_ri64(regs, &tb_ptr);
69
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
70
index XXXXXXX..XXXXXXX 100644
71
--- a/tcg/tci/tcg-target.c.inc
72
+++ b/tcg/tci/tcg-target.c.inc
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
74
case INDEX_op_sar_i64:
75
case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
76
case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
77
+ case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
78
+ case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
79
+ case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
80
+ case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
81
tcg_out_r(s, args[0]);
82
tcg_out_ri64(s, const_args[1], args[1]);
83
tcg_out_ri64(s, const_args[2], args[2]);
84
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
85
tcg_debug_assert(args[4] <= UINT8_MAX);
86
tcg_out8(s, args[4]);
87
break;
33
break;
88
- case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
89
- case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
90
- case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
91
- case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
92
- TODO();
93
- break;
94
case INDEX_op_brcond_i64:
95
tcg_out_r(s, args[0]);
96
tcg_out_ri64(s, const_args[1], args[1]);
97
--
34
--
98
2.25.1
35
2.43.0
99
100
diff view generated by jsdifflib
1
From: Eduardo Habkost <ehabkost@redhat.com>
1
Duplicate fold_sub_vec into fold_sub instead of calling it,
2
now that fold_sub_vec always returns true.
2
3
3
[claudio: wrapped target code in CONFIG_TCG]
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
5
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
6
Signed-off-by: Claudio Fontana <cfontana@suse.de>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20210204163931.7358-7-cfontana@suse.de>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
include/hw/core/cpu.h | 21 ++++++++++++---------
7
tcg/optimize.c | 9 ++++++---
14
accel/tcg/cputlb.c | 7 ++++---
8
1 file changed, 6 insertions(+), 3 deletions(-)
15
accel/tcg/user-exec.c | 6 +++---
16
target/alpha/cpu.c | 2 +-
17
target/arm/cpu.c | 2 +-
18
target/avr/cpu.c | 2 +-
19
target/cris/cpu.c | 2 +-
20
target/hppa/cpu.c | 2 +-
21
target/i386/tcg/tcg-cpu.c | 2 +-
22
target/lm32/cpu.c | 2 +-
23
target/m68k/cpu.c | 2 +-
24
target/microblaze/cpu.c | 2 +-
25
target/mips/cpu.c | 2 +-
26
target/moxie/cpu.c | 2 +-
27
target/nios2/cpu.c | 2 +-
28
target/openrisc/cpu.c | 2 +-
29
target/riscv/cpu.c | 2 +-
30
target/rx/cpu.c | 2 +-
31
target/s390x/cpu.c | 2 +-
32
target/sh4/cpu.c | 2 +-
33
target/sparc/cpu.c | 2 +-
34
target/tilegx/cpu.c | 2 +-
35
target/tricore/cpu.c | 2 +-
36
target/unicore32/cpu.c | 2 +-
37
target/xtensa/cpu.c | 2 +-
38
target/ppc/translate_init.c.inc | 2 +-
39
26 files changed, 42 insertions(+), 38 deletions(-)
40
9
41
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
42
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
43
--- a/include/hw/core/cpu.h
12
--- a/tcg/optimize.c
44
+++ b/include/hw/core/cpu.h
13
+++ b/tcg/optimize.c
45
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
14
@@ -XXX,XX +XXX,XX @@ static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
46
void (*cpu_exec_exit)(CPUState *cpu);
15
fold_sub_to_neg(ctx, op)) {
47
/** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
16
return true;
48
bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
17
}
49
+ /**
18
- return false;
50
+ * @tlb_fill: Handle a softmmu tlb miss or user-only address fault
19
+ return finish_folding(ctx, op);
51
+ *
52
+ * For system mode, if the access is valid, call tlb_set_page
53
+ * and return true; if the access is invalid, and probe is
54
+ * true, return false; otherwise raise an exception and do
55
+ * not return. For user-only mode, always raise an exception
56
+ * and do not return.
57
+ */
58
+ bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
59
+ MMUAccessType access_type, int mmu_idx,
60
+ bool probe, uintptr_t retaddr);
61
62
} TcgCpuOperations;
63
64
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
65
* If the target behaviour here is anything other than "set
66
* the PC register to the value passed in" then the target must
67
* also implement the synchronize_from_tb hook.
68
- * @tlb_fill: Callback for handling a softmmu tlb miss or user-only
69
- * address fault. For system mode, if the access is valid, call
70
- * tlb_set_page and return true; if the access is invalid, and
71
- * probe is true, return false; otherwise raise an exception and
72
- * do not return. For user-only mode, always raise an exception
73
- * and do not return.
74
* @get_phys_page_debug: Callback for obtaining a physical address.
75
* @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
76
* associated memory transaction attributes to use for the access.
77
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
78
void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
79
Error **errp);
80
void (*set_pc)(CPUState *cpu, vaddr value);
81
- bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
82
- MMUAccessType access_type, int mmu_idx,
83
- bool probe, uintptr_t retaddr);
84
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
85
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
86
MemTxAttrs *attrs);
87
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/accel/tcg/cputlb.c
90
+++ b/accel/tcg/cputlb.c
91
@@ -XXX,XX +XXX,XX @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
92
* This is not a probe, so only valid return is success; failure
93
* should result in exception + longjmp to the cpu loop.
94
*/
95
- ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
96
+ ok = cc->tcg_ops.tlb_fill(cpu, addr, size,
97
+ access_type, mmu_idx, false, retaddr);
98
assert(ok);
99
}
20
}
100
21
101
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
22
static bool fold_sub(OptContext *ctx, TCGOp *op)
102
CPUState *cs = env_cpu(env);
23
{
103
CPUClass *cc = CPU_GET_CLASS(cs);
24
- if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
104
25
+ if (fold_const2(ctx, op) ||
105
- if (!cc->tlb_fill(cs, addr, fault_size, access_type,
26
+ fold_xx_to_i(ctx, op, 0) ||
106
- mmu_idx, nonfault, retaddr)) {
27
+ fold_xi_to_x(ctx, op, 0) ||
107
+ if (!cc->tcg_ops.tlb_fill(cs, addr, fault_size, access_type,
28
+ fold_sub_to_neg(ctx, op)) {
108
+ mmu_idx, nonfault, retaddr)) {
29
return true;
109
/* Non-faulting page table read failed. */
30
}
110
*phost = NULL;
31
111
return TLB_INVALID_MASK;
32
@@ -XXX,XX +XXX,XX @@ static bool fold_sub(OptContext *ctx, TCGOp *op)
112
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
33
? INDEX_op_add_i32 : INDEX_op_add_i64);
113
index XXXXXXX..XXXXXXX 100644
34
op->args[2] = arg_new_constant(ctx, -val);
114
--- a/accel/tcg/user-exec.c
35
}
115
+++ b/accel/tcg/user-exec.c
36
- return false;
116
@@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
37
+ return finish_folding(ctx, op);
117
clear_helper_retaddr();
118
119
cc = CPU_GET_CLASS(cpu);
120
- cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
121
+ cc->tcg_ops.tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
122
g_assert_not_reached();
123
}
38
}
124
39
125
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
40
static bool fold_sub2(OptContext *ctx, TCGOp *op)
126
} else {
127
CPUState *cpu = env_cpu(env);
128
CPUClass *cc = CPU_GET_CLASS(cpu);
129
- cc->tlb_fill(cpu, addr, fault_size, access_type,
130
- MMU_USER_IDX, false, ra);
131
+ cc->tcg_ops.tlb_fill(cpu, addr, fault_size, access_type,
132
+ MMU_USER_IDX, false, ra);
133
g_assert_not_reached();
134
}
135
}
136
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
137
index XXXXXXX..XXXXXXX 100644
138
--- a/target/alpha/cpu.c
139
+++ b/target/alpha/cpu.c
140
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
141
cc->set_pc = alpha_cpu_set_pc;
142
cc->gdb_read_register = alpha_cpu_gdb_read_register;
143
cc->gdb_write_register = alpha_cpu_gdb_write_register;
144
- cc->tlb_fill = alpha_cpu_tlb_fill;
145
+ cc->tcg_ops.tlb_fill = alpha_cpu_tlb_fill;
146
#ifndef CONFIG_USER_ONLY
147
cc->do_transaction_failed = alpha_cpu_do_transaction_failed;
148
cc->do_unaligned_access = alpha_cpu_do_unaligned_access;
149
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
150
index XXXXXXX..XXXXXXX 100644
151
--- a/target/arm/cpu.c
152
+++ b/target/arm/cpu.c
153
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
154
cc->tcg_ops.initialize = arm_translate_init;
155
cc->tcg_ops.cpu_exec_interrupt = arm_cpu_exec_interrupt;
156
cc->tcg_ops.synchronize_from_tb = arm_cpu_synchronize_from_tb;
157
- cc->tlb_fill = arm_cpu_tlb_fill;
158
+ cc->tcg_ops.tlb_fill = arm_cpu_tlb_fill;
159
cc->debug_excp_handler = arm_debug_excp_handler;
160
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
161
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
162
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/target/avr/cpu.c
165
+++ b/target/avr/cpu.c
166
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
167
cc->set_pc = avr_cpu_set_pc;
168
cc->memory_rw_debug = avr_cpu_memory_rw_debug;
169
cc->get_phys_page_debug = avr_cpu_get_phys_page_debug;
170
- cc->tlb_fill = avr_cpu_tlb_fill;
171
+ cc->tcg_ops.tlb_fill = avr_cpu_tlb_fill;
172
cc->vmsd = &vms_avr_cpu;
173
cc->disas_set_info = avr_cpu_disas_set_info;
174
cc->tcg_ops.initialize = avr_cpu_tcg_init;
175
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/target/cris/cpu.c
178
+++ b/target/cris/cpu.c
179
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
180
cc->set_pc = cris_cpu_set_pc;
181
cc->gdb_read_register = cris_cpu_gdb_read_register;
182
cc->gdb_write_register = cris_cpu_gdb_write_register;
183
- cc->tlb_fill = cris_cpu_tlb_fill;
184
+ cc->tcg_ops.tlb_fill = cris_cpu_tlb_fill;
185
#ifndef CONFIG_USER_ONLY
186
cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
187
dc->vmsd = &vmstate_cris_cpu;
188
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
189
index XXXXXXX..XXXXXXX 100644
190
--- a/target/hppa/cpu.c
191
+++ b/target/hppa/cpu.c
192
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
193
cc->tcg_ops.synchronize_from_tb = hppa_cpu_synchronize_from_tb;
194
cc->gdb_read_register = hppa_cpu_gdb_read_register;
195
cc->gdb_write_register = hppa_cpu_gdb_write_register;
196
- cc->tlb_fill = hppa_cpu_tlb_fill;
197
+ cc->tcg_ops.tlb_fill = hppa_cpu_tlb_fill;
198
#ifndef CONFIG_USER_ONLY
199
cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
200
dc->vmsd = &vmstate_hppa_cpu;
201
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
202
index XXXXXXX..XXXXXXX 100644
203
--- a/target/i386/tcg/tcg-cpu.c
204
+++ b/target/i386/tcg/tcg-cpu.c
205
@@ -XXX,XX +XXX,XX @@ void tcg_cpu_common_class_init(CPUClass *cc)
206
cc->tcg_ops.cpu_exec_enter = x86_cpu_exec_enter;
207
cc->tcg_ops.cpu_exec_exit = x86_cpu_exec_exit;
208
cc->tcg_ops.initialize = tcg_x86_init;
209
- cc->tlb_fill = x86_cpu_tlb_fill;
210
+ cc->tcg_ops.tlb_fill = x86_cpu_tlb_fill;
211
#ifndef CONFIG_USER_ONLY
212
cc->debug_excp_handler = breakpoint_handler;
213
#endif
214
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
215
index XXXXXXX..XXXXXXX 100644
216
--- a/target/lm32/cpu.c
217
+++ b/target/lm32/cpu.c
218
@@ -XXX,XX +XXX,XX @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
219
cc->set_pc = lm32_cpu_set_pc;
220
cc->gdb_read_register = lm32_cpu_gdb_read_register;
221
cc->gdb_write_register = lm32_cpu_gdb_write_register;
222
- cc->tlb_fill = lm32_cpu_tlb_fill;
223
+ cc->tcg_ops.tlb_fill = lm32_cpu_tlb_fill;
224
#ifndef CONFIG_USER_ONLY
225
cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug;
226
cc->vmsd = &vmstate_lm32_cpu;
227
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
228
index XXXXXXX..XXXXXXX 100644
229
--- a/target/m68k/cpu.c
230
+++ b/target/m68k/cpu.c
231
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
232
cc->set_pc = m68k_cpu_set_pc;
233
cc->gdb_read_register = m68k_cpu_gdb_read_register;
234
cc->gdb_write_register = m68k_cpu_gdb_write_register;
235
- cc->tlb_fill = m68k_cpu_tlb_fill;
236
+ cc->tcg_ops.tlb_fill = m68k_cpu_tlb_fill;
237
#if defined(CONFIG_SOFTMMU)
238
cc->do_transaction_failed = m68k_cpu_transaction_failed;
239
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
240
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
241
index XXXXXXX..XXXXXXX 100644
242
--- a/target/microblaze/cpu.c
243
+++ b/target/microblaze/cpu.c
244
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
245
cc->tcg_ops.synchronize_from_tb = mb_cpu_synchronize_from_tb;
246
cc->gdb_read_register = mb_cpu_gdb_read_register;
247
cc->gdb_write_register = mb_cpu_gdb_write_register;
248
- cc->tlb_fill = mb_cpu_tlb_fill;
249
+ cc->tcg_ops.tlb_fill = mb_cpu_tlb_fill;
250
#ifndef CONFIG_USER_ONLY
251
cc->do_transaction_failed = mb_cpu_transaction_failed;
252
cc->get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug;
253
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
254
index XXXXXXX..XXXXXXX 100644
255
--- a/target/mips/cpu.c
256
+++ b/target/mips/cpu.c
257
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
258
cc->tcg_ops.initialize = mips_tcg_init;
259
cc->tcg_ops.cpu_exec_interrupt = mips_cpu_exec_interrupt;
260
cc->tcg_ops.synchronize_from_tb = mips_cpu_synchronize_from_tb;
261
- cc->tlb_fill = mips_cpu_tlb_fill;
262
+ cc->tcg_ops.tlb_fill = mips_cpu_tlb_fill;
263
#endif
264
265
cc->gdb_num_core_regs = 73;
266
diff --git a/target/moxie/cpu.c b/target/moxie/cpu.c
267
index XXXXXXX..XXXXXXX 100644
268
--- a/target/moxie/cpu.c
269
+++ b/target/moxie/cpu.c
270
@@ -XXX,XX +XXX,XX @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
271
cc->do_interrupt = moxie_cpu_do_interrupt;
272
cc->dump_state = moxie_cpu_dump_state;
273
cc->set_pc = moxie_cpu_set_pc;
274
- cc->tlb_fill = moxie_cpu_tlb_fill;
275
+ cc->tcg_ops.tlb_fill = moxie_cpu_tlb_fill;
276
#ifndef CONFIG_USER_ONLY
277
cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug;
278
cc->vmsd = &vmstate_moxie_cpu;
279
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
280
index XXXXXXX..XXXXXXX 100644
281
--- a/target/nios2/cpu.c
282
+++ b/target/nios2/cpu.c
283
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
284
cc->dump_state = nios2_cpu_dump_state;
285
cc->set_pc = nios2_cpu_set_pc;
286
cc->disas_set_info = nios2_cpu_disas_set_info;
287
- cc->tlb_fill = nios2_cpu_tlb_fill;
288
+ cc->tcg_ops.tlb_fill = nios2_cpu_tlb_fill;
289
#ifndef CONFIG_USER_ONLY
290
cc->do_unaligned_access = nios2_cpu_do_unaligned_access;
291
cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
292
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
293
index XXXXXXX..XXXXXXX 100644
294
--- a/target/openrisc/cpu.c
295
+++ b/target/openrisc/cpu.c
296
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
297
cc->set_pc = openrisc_cpu_set_pc;
298
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
299
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
300
- cc->tlb_fill = openrisc_cpu_tlb_fill;
301
+ cc->tcg_ops.tlb_fill = openrisc_cpu_tlb_fill;
302
#ifndef CONFIG_USER_ONLY
303
cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
304
dc->vmsd = &vmstate_openrisc_cpu;
305
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
306
index XXXXXXX..XXXXXXX 100644
307
--- a/target/riscv/cpu.c
308
+++ b/target/riscv/cpu.c
309
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
310
cc->gdb_arch_name = riscv_gdb_arch_name;
311
cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
312
cc->tcg_ops.initialize = riscv_translate_init;
313
- cc->tlb_fill = riscv_cpu_tlb_fill;
314
+ cc->tcg_ops.tlb_fill = riscv_cpu_tlb_fill;
315
316
device_class_set_props(dc, riscv_cpu_properties);
317
}
318
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
319
index XXXXXXX..XXXXXXX 100644
320
--- a/target/rx/cpu.c
321
+++ b/target/rx/cpu.c
322
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
323
cc->get_phys_page_debug = rx_cpu_get_phys_page_debug;
324
cc->disas_set_info = rx_cpu_disas_set_info;
325
cc->tcg_ops.initialize = rx_translate_init;
326
- cc->tlb_fill = rx_cpu_tlb_fill;
327
+ cc->tcg_ops.tlb_fill = rx_cpu_tlb_fill;
328
329
cc->gdb_num_core_regs = 26;
330
cc->gdb_core_xml_file = "rx-core.xml";
331
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
332
index XXXXXXX..XXXXXXX 100644
333
--- a/target/s390x/cpu.c
334
+++ b/target/s390x/cpu.c
335
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
336
cc->disas_set_info = s390_cpu_disas_set_info;
337
#ifdef CONFIG_TCG
338
cc->tcg_ops.initialize = s390x_translate_init;
339
- cc->tlb_fill = s390_cpu_tlb_fill;
340
+ cc->tcg_ops.tlb_fill = s390_cpu_tlb_fill;
341
#endif
342
343
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
344
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
345
index XXXXXXX..XXXXXXX 100644
346
--- a/target/sh4/cpu.c
347
+++ b/target/sh4/cpu.c
348
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
349
cc->tcg_ops.synchronize_from_tb = superh_cpu_synchronize_from_tb;
350
cc->gdb_read_register = superh_cpu_gdb_read_register;
351
cc->gdb_write_register = superh_cpu_gdb_write_register;
352
- cc->tlb_fill = superh_cpu_tlb_fill;
353
+ cc->tcg_ops.tlb_fill = superh_cpu_tlb_fill;
354
#ifndef CONFIG_USER_ONLY
355
cc->do_unaligned_access = superh_cpu_do_unaligned_access;
356
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
357
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
358
index XXXXXXX..XXXXXXX 100644
359
--- a/target/sparc/cpu.c
360
+++ b/target/sparc/cpu.c
361
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
362
cc->tcg_ops.synchronize_from_tb = sparc_cpu_synchronize_from_tb;
363
cc->gdb_read_register = sparc_cpu_gdb_read_register;
364
cc->gdb_write_register = sparc_cpu_gdb_write_register;
365
- cc->tlb_fill = sparc_cpu_tlb_fill;
366
+ cc->tcg_ops.tlb_fill = sparc_cpu_tlb_fill;
367
#ifndef CONFIG_USER_ONLY
368
cc->do_transaction_failed = sparc_cpu_do_transaction_failed;
369
cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
370
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
371
index XXXXXXX..XXXXXXX 100644
372
--- a/target/tilegx/cpu.c
373
+++ b/target/tilegx/cpu.c
374
@@ -XXX,XX +XXX,XX @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
375
cc->tcg_ops.cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
376
cc->dump_state = tilegx_cpu_dump_state;
377
cc->set_pc = tilegx_cpu_set_pc;
378
- cc->tlb_fill = tilegx_cpu_tlb_fill;
379
+ cc->tcg_ops.tlb_fill = tilegx_cpu_tlb_fill;
380
cc->gdb_num_core_regs = 0;
381
cc->tcg_ops.initialize = tilegx_tcg_init;
382
}
383
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
384
index XXXXXXX..XXXXXXX 100644
385
--- a/target/tricore/cpu.c
386
+++ b/target/tricore/cpu.c
387
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
388
cc->tcg_ops.synchronize_from_tb = tricore_cpu_synchronize_from_tb;
389
cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug;
390
cc->tcg_ops.initialize = tricore_tcg_init;
391
- cc->tlb_fill = tricore_cpu_tlb_fill;
392
+ cc->tcg_ops.tlb_fill = tricore_cpu_tlb_fill;
393
}
394
395
#define DEFINE_TRICORE_CPU_TYPE(cpu_model, initfn) \
396
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
397
index XXXXXXX..XXXXXXX 100644
398
--- a/target/unicore32/cpu.c
399
+++ b/target/unicore32/cpu.c
400
@@ -XXX,XX +XXX,XX @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
401
cc->tcg_ops.cpu_exec_interrupt = uc32_cpu_exec_interrupt;
402
cc->dump_state = uc32_cpu_dump_state;
403
cc->set_pc = uc32_cpu_set_pc;
404
- cc->tlb_fill = uc32_cpu_tlb_fill;
405
+ cc->tcg_ops.tlb_fill = uc32_cpu_tlb_fill;
406
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
407
cc->tcg_ops.initialize = uc32_translate_init;
408
dc->vmsd = &vmstate_uc32_cpu;
409
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
410
index XXXXXXX..XXXXXXX 100644
411
--- a/target/xtensa/cpu.c
412
+++ b/target/xtensa/cpu.c
413
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
414
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
415
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
416
cc->gdb_stop_before_watchpoint = true;
417
- cc->tlb_fill = xtensa_cpu_tlb_fill;
418
+ cc->tcg_ops.tlb_fill = xtensa_cpu_tlb_fill;
419
#ifndef CONFIG_USER_ONLY
420
cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
421
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
422
diff --git a/target/ppc/translate_init.c.inc b/target/ppc/translate_init.c.inc
423
index XXXXXXX..XXXXXXX 100644
424
--- a/target/ppc/translate_init.c.inc
425
+++ b/target/ppc/translate_init.c.inc
426
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
427
#ifdef CONFIG_TCG
428
cc->tcg_ops.initialize = ppc_translate_init;
429
cc->tcg_ops.cpu_exec_interrupt = ppc_cpu_exec_interrupt;
430
- cc->tlb_fill = ppc_cpu_tlb_fill;
431
+ cc->tcg_ops.tlb_fill = ppc_cpu_tlb_fill;
432
#ifndef CONFIG_USER_ONLY
433
cc->tcg_ops.cpu_exec_enter = ppc_cpu_exec_enter;
434
cc->tcg_ops.cpu_exec_exit = ppc_cpu_exec_exit;
435
--
41
--
436
2.25.1
42
2.43.0
437
438
diff view generated by jsdifflib
1
Fixes INDEX_op_rotli_vec for aarch64 host, where the 3rd
1
Avoid the use of the OptContext slots.
2
argument is an integer, not a temporary, which now tickles
3
an assert added in e89b28a6350.
4
2
5
Previously, the value computed into v2 would be garbage for
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
6
rotli_vec, but as the value was unused it caused no harm.
7
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
tcg/aarch64/tcg-target.c.inc | 7 ++++---
6
tcg/optimize.c | 16 +++++++++-------
11
1 file changed, 4 insertions(+), 3 deletions(-)
7
1 file changed, 9 insertions(+), 7 deletions(-)
12
8
13
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/aarch64/tcg-target.c.inc
11
--- a/tcg/optimize.c
16
+++ b/tcg/aarch64/tcg-target.c.inc
12
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
13
@@ -XXX,XX +XXX,XX @@ static bool fold_sub2(OptContext *ctx, TCGOp *op)
18
v0 = temp_tcgv_vec(arg_temp(a0));
14
19
v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
15
static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
20
a2 = va_arg(va, TCGArg);
16
{
21
- v2 = temp_tcgv_vec(arg_temp(a2));
17
+ uint64_t z_mask = -1, s_mask = 0;
22
+ va_end(va);
18
+
23
19
/* We can't do any folding with a load, but we can record bits. */
24
switch (opc) {
20
switch (op->opc) {
25
case INDEX_op_rotli_vec:
21
CASE_OP_32_64(ld8s):
26
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
22
- ctx->s_mask = MAKE_64BIT_MASK(8, 56);
27
case INDEX_op_shrv_vec:
23
+ s_mask = INT8_MIN;
28
case INDEX_op_sarv_vec:
29
/* Right shifts are negative left shifts for AArch64. */
30
+ v2 = temp_tcgv_vec(arg_temp(a2));
31
t1 = tcg_temp_new_vec(type);
32
tcg_gen_neg_vec(vece, t1, v2);
33
opc = (opc == INDEX_op_shrv_vec
34
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
35
break;
24
break;
36
25
CASE_OP_32_64(ld8u):
37
case INDEX_op_rotlv_vec:
26
- ctx->z_mask = MAKE_64BIT_MASK(0, 8);
38
+ v2 = temp_tcgv_vec(arg_temp(a2));
27
+ z_mask = MAKE_64BIT_MASK(0, 8);
39
t1 = tcg_temp_new_vec(type);
40
c1 = tcg_constant_vec(type, vece, 8 << vece);
41
tcg_gen_sub_vec(vece, t1, v2, c1);
42
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
43
break;
28
break;
44
29
CASE_OP_32_64(ld16s):
45
case INDEX_op_rotrv_vec:
30
- ctx->s_mask = MAKE_64BIT_MASK(16, 48);
46
+ v2 = temp_tcgv_vec(arg_temp(a2));
31
+ s_mask = INT16_MIN;
47
t1 = tcg_temp_new_vec(type);
32
break;
48
t2 = tcg_temp_new_vec(type);
33
CASE_OP_32_64(ld16u):
49
c1 = tcg_constant_vec(type, vece, 8 << vece);
34
- ctx->z_mask = MAKE_64BIT_MASK(0, 16);
50
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
35
+ z_mask = MAKE_64BIT_MASK(0, 16);
36
break;
37
case INDEX_op_ld32s_i64:
38
- ctx->s_mask = MAKE_64BIT_MASK(32, 32);
39
+ s_mask = INT32_MIN;
40
break;
41
case INDEX_op_ld32u_i64:
42
- ctx->z_mask = MAKE_64BIT_MASK(0, 32);
43
+ z_mask = MAKE_64BIT_MASK(0, 32);
44
break;
51
default:
45
default:
52
g_assert_not_reached();
46
g_assert_not_reached();
53
}
47
}
54
-
48
- return false;
55
- va_end(va);
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
56
}
50
}
57
51
58
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
52
static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
59
--
53
--
60
2.25.1
54
2.43.0
61
62
diff view generated by jsdifflib
1
We do not simultaneously support div and div2 -- it's one
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
or the other. TCI is already using div, so remove div2.
3
4
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
tcg/tci.c | 12 ------------
4
tcg/optimize.c | 2 +-
9
tcg/tci/tcg-target.c.inc | 8 --------
5
1 file changed, 1 insertion(+), 1 deletion(-)
10
2 files changed, 20 deletions(-)
11
6
12
diff --git a/tcg/tci.c b/tcg/tci.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
13
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tci.c
9
--- a/tcg/optimize.c
15
+++ b/tcg/tci.c
10
+++ b/tcg/optimize.c
16
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
11
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
17
t2 = tci_read_ri32(regs, &tb_ptr);
12
TCGType type;
18
tci_write_reg(regs, t0, t1 * t2);
13
19
break;
14
if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
20
-#if TCG_TARGET_HAS_div_i32
15
- return false;
21
case INDEX_op_div_i32:
16
+ return finish_folding(ctx, op);
22
t0 = *tb_ptr++;
17
}
23
t1 = tci_read_ri32(regs, &tb_ptr);
18
24
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
19
type = ctx->type;
25
t2 = tci_read_ri32(regs, &tb_ptr);
26
tci_write_reg(regs, t0, t1 % t2);
27
break;
28
-#elif TCG_TARGET_HAS_div2_i32
29
- case INDEX_op_div2_i32:
30
- case INDEX_op_divu2_i32:
31
- TODO();
32
- break;
33
-#endif
34
case INDEX_op_and_i32:
35
t0 = *tb_ptr++;
36
t1 = tci_read_ri32(regs, &tb_ptr);
37
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
38
case INDEX_op_remu_i64:
39
TODO();
40
break;
41
-#elif TCG_TARGET_HAS_div2_i64
42
- case INDEX_op_div2_i64:
43
- case INDEX_op_divu2_i64:
44
- TODO();
45
- break;
46
#endif
47
case INDEX_op_and_i64:
48
t0 = *tb_ptr++;
49
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
50
index XXXXXXX..XXXXXXX 100644
51
--- a/tcg/tci/tcg-target.c.inc
52
+++ b/tcg/tci/tcg-target.c.inc
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
54
case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
55
TODO();
56
break;
57
- case INDEX_op_div2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */
58
- case INDEX_op_divu2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */
59
- TODO();
60
- break;
61
case INDEX_op_brcond_i64:
62
tcg_out_r(s, args[0]);
63
tcg_out_ri64(s, const_args[1], args[1]);
64
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
65
tcg_out_ri32(s, const_args[1], args[1]);
66
tcg_out_ri32(s, const_args[2], args[2]);
67
break;
68
- case INDEX_op_div2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */
69
- case INDEX_op_divu2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */
70
- TODO();
71
- break;
72
#if TCG_TARGET_REG_BITS == 32
73
case INDEX_op_add2_i32:
74
case INDEX_op_sub2_i32:
75
--
20
--
76
2.25.1
21
2.43.0
77
78
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Avoid the use of the OptContext slots. Find TempOptInfo once.
2
Remove fold_masks as the function becomes unused.
2
3
3
commit 40612000599e ("arm: Correctly handle watchpoints for BE32 CPUs")
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
5
introduced this ARM-specific, TCG-specific hack to adjust the address,
6
before checking it with cpu_check_watchpoint.
7
8
Make adjust_watchpoint_address optional and move it to tcg_ops.
9
10
Signed-off-by: Claudio Fontana <cfontana@suse.de>
11
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
12
Message-Id: <20210204163931.7358-14-cfontana@suse.de>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
6
---
15
include/hw/core/cpu.h | 6 +++++-
7
tcg/optimize.c | 18 ++++++++----------
16
hw/core/cpu.c | 6 ------
8
1 file changed, 8 insertions(+), 10 deletions(-)
17
softmmu/physmem.c | 5 ++++-
18
target/arm/cpu.c | 2 +-
19
4 files changed, 10 insertions(+), 9 deletions(-)
20
9
21
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
22
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
23
--- a/include/hw/core/cpu.h
12
--- a/tcg/optimize.c
24
+++ b/include/hw/core/cpu.h
13
+++ b/tcg/optimize.c
25
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
14
@@ -XXX,XX +XXX,XX @@ static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
26
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
15
return fold_masks_zs(ctx, op, -1, s_mask);
27
MMUAccessType access_type,
28
int mmu_idx, uintptr_t retaddr);
29
+ /**
30
+ * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
31
+ */
32
+ vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
33
+
34
} TcgCpuOperations;
35
36
/**
37
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
38
const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
39
40
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
41
- vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
42
43
const char *deprecation_note;
44
/* Keep non-pointer data at the end to minimize holes. */
45
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/hw/core/cpu.c
48
+++ b/hw/core/cpu.c
49
@@ -XXX,XX +XXX,XX @@ static int64_t cpu_common_get_arch_id(CPUState *cpu)
50
return cpu->cpu_index;
51
}
16
}
52
17
53
-static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
18
-static bool fold_masks(OptContext *ctx, TCGOp *op)
54
-{
19
-{
55
- return addr;
20
- return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
56
-}
21
-}
57
-
22
-
58
static Property cpu_common_props[] = {
23
/*
59
#ifndef CONFIG_USER_ONLY
24
* An "affected" mask bit is 0 if and only if the result is identical
60
/* Create a memory property for softmmu CPU object,
25
* to the first input. Thus if the entire mask is 0, the operation
61
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
26
@@ -XXX,XX +XXX,XX @@ static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
62
k->gdb_write_register = cpu_common_gdb_write_register;
27
63
k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
28
static bool fold_xor(OptContext *ctx, TCGOp *op)
64
k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
29
{
65
- k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
30
+ uint64_t z_mask, s_mask;
66
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
31
+ TempOptInfo *t1, *t2;
67
dc->realize = cpu_common_realizefn;
32
+
68
dc->unrealize = cpu_common_unrealizefn;
33
if (fold_const2_commutative(ctx, op) ||
69
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
34
fold_xx_to_i(ctx, op, 0) ||
70
index XXXXXXX..XXXXXXX 100644
35
fold_xi_to_x(ctx, op, 0) ||
71
--- a/softmmu/physmem.c
36
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
72
+++ b/softmmu/physmem.c
37
return true;
73
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
74
return;
75
}
38
}
76
39
77
- addr = cc->adjust_watchpoint_address(cpu, addr, len);
40
- ctx->z_mask = arg_info(op->args[1])->z_mask
78
+ if (cc->tcg_ops.adjust_watchpoint_address) {
41
- | arg_info(op->args[2])->z_mask;
79
+ /* this is currently used only by ARM BE32 */
42
- ctx->s_mask = arg_info(op->args[1])->s_mask
80
+ addr = cc->tcg_ops.adjust_watchpoint_address(cpu, addr, len);
43
- & arg_info(op->args[2])->s_mask;
81
+ }
44
- return fold_masks(ctx, op);
82
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
45
+ t1 = arg_info(op->args[1]);
83
if (watchpoint_address_matches(wp, addr, len)
46
+ t2 = arg_info(op->args[2]);
84
&& (wp->flags & flags)) {
47
+ z_mask = t1->z_mask | t2->z_mask;
85
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
48
+ s_mask = t1->s_mask & t2->s_mask;
86
index XXXXXXX..XXXXXXX 100644
49
+ return fold_masks_zs(ctx, op, z_mask, s_mask);
87
--- a/target/arm/cpu.c
50
}
88
+++ b/target/arm/cpu.c
51
89
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
52
static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
90
#if !defined(CONFIG_USER_ONLY)
91
cc->tcg_ops.do_transaction_failed = arm_cpu_do_transaction_failed;
92
cc->tcg_ops.do_unaligned_access = arm_cpu_do_unaligned_access;
93
- cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
94
+ cc->tcg_ops.adjust_watchpoint_address = arm_adjust_watchpoint_address;
95
cc->tcg_ops.do_interrupt = arm_cpu_do_interrupt;
96
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
97
#endif /* CONFIG_TCG */
98
--
53
--
99
2.25.1
54
2.43.0
100
101
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
3
---
5
tcg/tci.c | 10 +---------
4
tcg/optimize.c | 2 +-
6
1 file changed, 1 insertion(+), 9 deletions(-)
5
1 file changed, 1 insertion(+), 1 deletion(-)
7
6
8
diff --git a/tcg/tci.c b/tcg/tci.c
7
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
8
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
9
--- a/tcg/optimize.c
11
+++ b/tcg/tci.c
10
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
11
@@ -XXX,XX +XXX,XX @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
13
regs[index] = value;
12
return fold_orc(ctx, op);
13
}
14
}
15
- return false;
16
+ return finish_folding(ctx, op);
14
}
17
}
15
18
16
-#if TCG_TARGET_REG_BITS == 64
19
/* Propagate constants and copies, fold constant expressions. */
17
-static void
18
-tci_write_reg32s(tcg_target_ulong *regs, TCGReg index, int32_t value)
19
-{
20
- tci_write_reg(regs, index, value);
21
-}
22
-#endif
23
-
24
static void tci_write_reg8(tcg_target_ulong *regs, TCGReg index, uint8_t value)
25
{
26
tci_write_reg(regs, index, value);
27
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
28
t0 = *tb_ptr++;
29
t1 = tci_read_r(regs, &tb_ptr);
30
t2 = tci_read_s32(&tb_ptr);
31
- tci_write_reg32s(regs, t0, *(int32_t *)(t1 + t2));
32
+ tci_write_reg(regs, t0, *(int32_t *)(t1 + t2));
33
break;
34
case INDEX_op_ld_i64:
35
t0 = *tb_ptr++;
36
--
20
--
37
2.25.1
21
2.43.0
38
39
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
All non-default cases now finish folding within each function.
2
Do the same with the default case and assert it is done after.
2
3
3
Signed-off-by: Claudio Fontana <cfontana@suse.de>
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
8
[claudio: wrap target code around CONFIG_TCG and !CONFIG_USER_ONLY]
9
10
avoiding its use in headers used by common_ss code (should be poisoned).
11
12
Note: need to be careful with the use of CONFIG_USER_ONLY,
13
Message-Id: <20210204163931.7358-11-cfontana@suse.de>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
6
---
16
include/hw/core/cpu.h | 28 +++++++++++++---------------
7
tcg/optimize.c | 6 ++----
17
hw/mips/jazz.c | 9 +++++++--
8
1 file changed, 2 insertions(+), 4 deletions(-)
18
target/alpha/cpu.c | 2 +-
19
target/arm/cpu.c | 4 ++--
20
target/m68k/cpu.c | 2 +-
21
target/microblaze/cpu.c | 2 +-
22
target/mips/cpu.c | 4 +++-
23
target/riscv/cpu.c | 2 +-
24
target/riscv/cpu_helper.c | 2 +-
25
target/sparc/cpu.c | 2 +-
26
target/xtensa/cpu.c | 2 +-
27
target/xtensa/helper.c | 4 ++--
28
12 files changed, 34 insertions(+), 29 deletions(-)
29
9
30
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
31
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
32
--- a/include/hw/core/cpu.h
12
--- a/tcg/optimize.c
33
+++ b/include/hw/core/cpu.h
13
+++ b/tcg/optimize.c
34
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
14
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
35
/** @debug_excp_handler: Callback for handling debug exceptions */
15
done = true;
36
void (*debug_excp_handler)(CPUState *cpu);
16
break;
37
17
default:
38
+ /**
18
+ done = finish_folding(&ctx, op);
39
+ * @do_transaction_failed: Callback for handling failed memory transactions
19
break;
40
+ * (ie bus faults or external aborts; not MMU faults)
20
}
41
+ */
42
+ void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
43
+ unsigned size, MMUAccessType access_type,
44
+ int mmu_idx, MemTxAttrs attrs,
45
+ MemTxResult response, uintptr_t retaddr);
46
} TcgCpuOperations;
47
48
/**
49
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
50
* @has_work: Callback for checking if there is work to do.
51
* @do_unaligned_access: Callback for unaligned access handling, if
52
* the target defines #TARGET_ALIGNED_ONLY.
53
- * @do_transaction_failed: Callback for handling failed memory transactions
54
- * (ie bus faults or external aborts; not MMU faults)
55
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
56
* runtime configurable endianness is currently big-endian. Non-configurable
57
* CPUs can use the default implementation of this method. This method should
58
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
59
void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
60
MMUAccessType access_type,
61
int mmu_idx, uintptr_t retaddr);
62
- void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
63
- unsigned size, MMUAccessType access_type,
64
- int mmu_idx, MemTxAttrs attrs,
65
- MemTxResult response, uintptr_t retaddr);
66
bool (*virtio_is_big_endian)(CPUState *cpu);
67
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
68
uint8_t *buf, int len, bool is_write);
69
@@ -XXX,XX +XXX,XX @@ CPUState *cpu_by_arch_id(int64_t id);
70
71
void cpu_interrupt(CPUState *cpu, int mask);
72
73
-#ifdef NEED_CPU_H
74
-
21
-
75
-#ifdef CONFIG_SOFTMMU
22
- if (!done) {
76
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
23
- finish_folding(&ctx, op);
77
MMUAccessType access_type,
24
- }
78
int mmu_idx, uintptr_t retaddr)
25
+ tcg_debug_assert(done);
79
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
80
{
81
CPUClass *cc = CPU_GET_CLASS(cpu);
82
83
- if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
84
- cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
85
- mmu_idx, attrs, response, retaddr);
86
+ if (!cpu->ignore_memory_transaction_failures &&
87
+ cc->tcg_ops.do_transaction_failed) {
88
+ cc->tcg_ops.do_transaction_failed(cpu, physaddr, addr, size,
89
+ access_type, mmu_idx, attrs,
90
+ response, retaddr);
91
}
26
}
92
}
27
}
93
-#endif
94
-
95
-#endif /* NEED_CPU_H */
96
97
/**
98
* cpu_set_pc:
99
diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/hw/mips/jazz.c
102
+++ b/hw/mips/jazz.c
103
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps dma_dummy_ops = {
104
#define MAGNUM_BIOS_SIZE_MAX 0x7e000
105
#define MAGNUM_BIOS_SIZE \
106
(BIOS_SIZE < MAGNUM_BIOS_SIZE_MAX ? BIOS_SIZE : MAGNUM_BIOS_SIZE_MAX)
107
+
108
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
109
static void (*real_do_transaction_failed)(CPUState *cpu, hwaddr physaddr,
110
vaddr addr, unsigned size,
111
MMUAccessType access_type,
112
@@ -XXX,XX +XXX,XX @@ static void mips_jazz_do_transaction_failed(CPUState *cs, hwaddr physaddr,
113
(*real_do_transaction_failed)(cs, physaddr, addr, size, access_type,
114
mmu_idx, attrs, response, retaddr);
115
}
116
+#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
117
118
static void mips_jazz_init(MachineState *machine,
119
enum jazz_model_e jazz_model)
120
@@ -XXX,XX +XXX,XX @@ static void mips_jazz_init(MachineState *machine,
121
* memory region that catches all memory accesses, as we do on Malta.
122
*/
123
cc = CPU_GET_CLASS(cpu);
124
- real_do_transaction_failed = cc->do_transaction_failed;
125
- cc->do_transaction_failed = mips_jazz_do_transaction_failed;
126
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
127
+ real_do_transaction_failed = cc->tcg_ops.do_transaction_failed;
128
+ cc->tcg_ops.do_transaction_failed = mips_jazz_do_transaction_failed;
129
+#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
130
131
/* allocate RAM */
132
memory_region_add_subregion(address_space, 0, machine->ram);
133
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/alpha/cpu.c
136
+++ b/target/alpha/cpu.c
137
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
138
cc->gdb_write_register = alpha_cpu_gdb_write_register;
139
cc->tcg_ops.tlb_fill = alpha_cpu_tlb_fill;
140
#ifndef CONFIG_USER_ONLY
141
- cc->do_transaction_failed = alpha_cpu_do_transaction_failed;
142
+ cc->tcg_ops.do_transaction_failed = alpha_cpu_do_transaction_failed;
143
cc->do_unaligned_access = alpha_cpu_do_unaligned_access;
144
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
145
dc->vmsd = &vmstate_alpha_cpu;
146
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/target/arm/cpu.c
149
+++ b/target/arm/cpu.c
150
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
151
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
152
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
153
#if !defined(CONFIG_USER_ONLY)
154
- cc->do_transaction_failed = arm_cpu_do_transaction_failed;
155
+ cc->tcg_ops.do_transaction_failed = arm_cpu_do_transaction_failed;
156
cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
157
cc->tcg_ops.do_interrupt = arm_cpu_do_interrupt;
158
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
159
-#endif
160
+#endif /* CONFIG_TCG */
161
}
162
163
#ifdef CONFIG_KVM
164
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
165
index XXXXXXX..XXXXXXX 100644
166
--- a/target/m68k/cpu.c
167
+++ b/target/m68k/cpu.c
168
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
169
cc->gdb_write_register = m68k_cpu_gdb_write_register;
170
cc->tcg_ops.tlb_fill = m68k_cpu_tlb_fill;
171
#if defined(CONFIG_SOFTMMU)
172
- cc->do_transaction_failed = m68k_cpu_transaction_failed;
173
+ cc->tcg_ops.do_transaction_failed = m68k_cpu_transaction_failed;
174
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
175
dc->vmsd = &vmstate_m68k_cpu;
176
#endif
177
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
178
index XXXXXXX..XXXXXXX 100644
179
--- a/target/microblaze/cpu.c
180
+++ b/target/microblaze/cpu.c
181
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
182
cc->gdb_write_register = mb_cpu_gdb_write_register;
183
cc->tcg_ops.tlb_fill = mb_cpu_tlb_fill;
184
#ifndef CONFIG_USER_ONLY
185
- cc->do_transaction_failed = mb_cpu_transaction_failed;
186
+ cc->tcg_ops.do_transaction_failed = mb_cpu_transaction_failed;
187
cc->get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug;
188
dc->vmsd = &vmstate_mb_cpu;
189
#endif
190
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
191
index XXXXXXX..XXXXXXX 100644
192
--- a/target/mips/cpu.c
193
+++ b/target/mips/cpu.c
194
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
195
cc->gdb_read_register = mips_cpu_gdb_read_register;
196
cc->gdb_write_register = mips_cpu_gdb_write_register;
197
#ifndef CONFIG_USER_ONLY
198
- cc->do_transaction_failed = mips_cpu_do_transaction_failed;
199
cc->do_unaligned_access = mips_cpu_do_unaligned_access;
200
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
201
cc->vmsd = &vmstate_mips_cpu;
202
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
203
cc->tcg_ops.cpu_exec_interrupt = mips_cpu_exec_interrupt;
204
cc->tcg_ops.synchronize_from_tb = mips_cpu_synchronize_from_tb;
205
cc->tcg_ops.tlb_fill = mips_cpu_tlb_fill;
206
+#ifndef CONFIG_USER_ONLY
207
+ cc->tcg_ops.do_transaction_failed = mips_cpu_do_transaction_failed;
208
+#endif /* CONFIG_USER_ONLY */
209
#endif /* CONFIG_TCG */
210
211
cc->gdb_num_core_regs = 73;
212
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/riscv/cpu.c
215
+++ b/target/riscv/cpu.c
216
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
217
cc->gdb_stop_before_watchpoint = true;
218
cc->disas_set_info = riscv_cpu_disas_set_info;
219
#ifndef CONFIG_USER_ONLY
220
- cc->do_transaction_failed = riscv_cpu_do_transaction_failed;
221
+ cc->tcg_ops.do_transaction_failed = riscv_cpu_do_transaction_failed;
222
cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
223
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
224
/* For now, mark unmigratable: */
225
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/riscv/cpu_helper.c
228
+++ b/target/riscv/cpu_helper.c
229
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
230
env->badaddr = addr;
231
riscv_raise_exception(env, cs->exception_index, retaddr);
232
}
233
-#endif
234
+#endif /* !CONFIG_USER_ONLY */
235
236
bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
237
MMUAccessType access_type, int mmu_idx,
238
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/sparc/cpu.c
241
+++ b/target/sparc/cpu.c
242
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
243
cc->gdb_write_register = sparc_cpu_gdb_write_register;
244
cc->tcg_ops.tlb_fill = sparc_cpu_tlb_fill;
245
#ifndef CONFIG_USER_ONLY
246
- cc->do_transaction_failed = sparc_cpu_do_transaction_failed;
247
+ cc->tcg_ops.do_transaction_failed = sparc_cpu_do_transaction_failed;
248
cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
249
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
250
cc->vmsd = &vmstate_sparc_cpu;
251
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/xtensa/cpu.c
254
+++ b/target/xtensa/cpu.c
255
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
256
#ifndef CONFIG_USER_ONLY
257
cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
258
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
259
- cc->do_transaction_failed = xtensa_cpu_do_transaction_failed;
260
+ cc->tcg_ops.do_transaction_failed = xtensa_cpu_do_transaction_failed;
261
#endif
262
cc->tcg_ops.debug_excp_handler = xtensa_breakpoint_handler;
263
cc->disas_set_info = xtensa_cpu_disas_set_info;
264
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/xtensa/helper.c
267
+++ b/target/xtensa/helper.c
268
@@ -XXX,XX +XXX,XX @@ bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
269
cpu_loop_exit_restore(cs, retaddr);
270
}
271
272
-#else
273
+#else /* !CONFIG_USER_ONLY */
274
275
void xtensa_cpu_do_unaligned_access(CPUState *cs,
276
vaddr addr, MMUAccessType access_type,
277
@@ -XXX,XX +XXX,XX @@ void xtensa_runstall(CPUXtensaState *env, bool runstall)
278
qemu_cpu_kick(cpu);
279
}
280
}
281
-#endif
282
+#endif /* !CONFIG_USER_ONLY */
283
--
28
--
284
2.25.1
29
2.43.0
285
286
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
All mask setting is now done with parameters via fold_masks_*.
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/tci.c | 8 +-------
6
tcg/optimize.c | 13 -------------
6
1 file changed, 1 insertion(+), 7 deletions(-)
7
1 file changed, 13 deletions(-)
7
8
8
diff --git a/tcg/tci.c b/tcg/tci.c
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
11
--- a/tcg/optimize.c
11
+++ b/tcg/tci.c
12
+++ b/tcg/optimize.c
12
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
13
@@ -XXX,XX +XXX,XX @@ typedef struct OptContext {
13
t2 = tci_read_s32(&tb_ptr);
14
QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
14
*(uint8_t *)(t1 + t2) = t0;
15
15
break;
16
/* In flight values from optimization. */
16
- case INDEX_op_st16_i32:
17
- uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
17
+ CASE_32_64(st16)
18
- uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
18
t0 = tci_read_r16(regs, &tb_ptr);
19
TCGType type;
19
t1 = tci_read_r(regs, &tb_ptr);
20
} OptContext;
20
t2 = tci_read_s32(&tb_ptr);
21
21
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
22
@@ -XXX,XX +XXX,XX @@ static bool finish_folding(OptContext *ctx, TCGOp *op)
22
t2 = tci_read_s32(&tb_ptr);
23
for (i = 0; i < nb_oargs; i++) {
23
tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2));
24
TCGTemp *ts = arg_temp(op->args[i]);
24
break;
25
reset_ts(ctx, ts);
25
- case INDEX_op_st16_i64:
26
- /*
26
- t0 = tci_read_r16(regs, &tb_ptr);
27
- * Save the corresponding known-zero/sign bits mask for the
27
- t1 = tci_read_r(regs, &tb_ptr);
28
- * first output argument (only one supported so far).
28
- t2 = tci_read_s32(&tb_ptr);
29
- */
29
- *(uint16_t *)(t1 + t2) = t0;
30
- if (i == 0) {
30
- break;
31
- ts_info(ts)->z_mask = ctx->z_mask;
31
case INDEX_op_st32_i64:
32
- }
32
t0 = tci_read_r32(regs, &tb_ptr);
33
}
33
t1 = tci_read_r(regs, &tb_ptr);
34
return true;
35
}
36
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
37
ctx.type = TCG_TYPE_I32;
38
}
39
40
- /* Assume all bits affected, no bits known zero, no sign reps. */
41
- ctx.z_mask = -1;
42
- ctx.s_mask = 0;
43
-
44
/*
45
* Process each opcode.
46
* Sorted alphabetically by opcode as much as possible.
34
--
47
--
35
2.25.1
48
2.43.0
36
37
diff view generated by jsdifflib
1
The configure option was backward, and we failed to
1
All instances of s_mask have been converted to the new
2
pass the value on to meson.
2
representation. We can now re-enable usage.
3
3
4
Fixes: 23a77b2d18b ("build-system: clean up TCG/TCI configury")
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
5
Tested-by: Stefan Weil <sw@weilnetz.de>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
9
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
configure | 5 +++--
7
tcg/optimize.c | 4 ++--
13
1 file changed, 3 insertions(+), 2 deletions(-)
8
1 file changed, 2 insertions(+), 2 deletions(-)
14
9
15
diff --git a/configure b/configure
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
16
index XXXXXXX..XXXXXXX 100755
11
index XXXXXXX..XXXXXXX 100644
17
--- a/configure
12
--- a/tcg/optimize.c
18
+++ b/configure
13
+++ b/tcg/optimize.c
19
@@ -XXX,XX +XXX,XX @@ for opt do
14
@@ -XXX,XX +XXX,XX @@ static bool fold_exts(OptContext *ctx, TCGOp *op)
20
;;
15
g_assert_not_reached();
21
--enable-whpx) whpx="enabled"
16
}
22
;;
17
23
- --disable-tcg-interpreter) tcg_interpreter="true"
18
- if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
24
+ --disable-tcg-interpreter) tcg_interpreter="false"
19
+ if (!type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
25
;;
20
return true;
26
- --enable-tcg-interpreter) tcg_interpreter="false"
21
}
27
+ --enable-tcg-interpreter) tcg_interpreter="true"
22
28
;;
23
@@ -XXX,XX +XXX,XX @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
29
--disable-cap-ng) cap_ng="disabled"
24
s_mask = s_mask_old >> pos;
30
;;
25
s_mask |= -1ull << (len - 1);
31
@@ -XXX,XX +XXX,XX @@ NINJA=$ninja $meson setup \
26
32
-Dvhost_user_blk_server=$vhost_user_blk_server \
27
- if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
33
-Dfuse=$fuse -Dfuse_lseek=$fuse_lseek -Dguest_agent_msi=$guest_agent_msi \
28
+ if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
34
$(if test "$default_features" = no; then echo "-Dauto_features=disabled"; fi) \
29
return true;
35
+    -Dtcg_interpreter=$tcg_interpreter \
30
}
36
$cross_arg \
37
"$PWD" "$source_path"
38
31
39
--
32
--
40
2.25.1
33
2.43.0
41
42
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
The big comment just above says functions should be sorted.
2
Add forward declarations as needed.
2
3
3
cpu_check_watchpoint, watchpoint_address_matches are TCG-only.
4
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
5
Signed-off-by: Claudio Fontana <cfontana@suse.de>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Message-Id: <20210204163931.7358-13-cfontana@suse.de>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
softmmu/physmem.c | 141 +++++++++++++++++++++++-----------------------
7
tcg/optimize.c | 114 +++++++++++++++++++++++++------------------------
11
1 file changed, 72 insertions(+), 69 deletions(-)
8
1 file changed, 59 insertions(+), 55 deletions(-)
12
9
13
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
10
diff --git a/tcg/optimize.c b/tcg/optimize.c
14
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
15
--- a/softmmu/physmem.c
12
--- a/tcg/optimize.c
16
+++ b/softmmu/physmem.c
13
+++ b/tcg/optimize.c
17
@@ -XXX,XX +XXX,XX @@ void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
14
@@ -XXX,XX +XXX,XX @@ static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
18
}
15
* 3) those that produce information about the result value.
16
*/
17
18
+static bool fold_or(OptContext *ctx, TCGOp *op);
19
+static bool fold_orc(OptContext *ctx, TCGOp *op);
20
+static bool fold_xor(OptContext *ctx, TCGOp *op);
21
+
22
static bool fold_add(OptContext *ctx, TCGOp *op)
23
{
24
if (fold_const2_commutative(ctx, op) ||
25
@@ -XXX,XX +XXX,XX @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
26
return fold_masks_zs(ctx, op, z_mask, s_mask);
19
}
27
}
20
28
21
+#ifdef CONFIG_TCG
29
+static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
22
/* Return true if this watchpoint address matches the specified
23
* access (ie the address range covered by the watchpoint overlaps
24
* partially or completely with the address range covered by the
25
@@ -XXX,XX +XXX,XX @@ int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
26
return ret;
27
}
28
29
+/* Generate a debug exception if a watchpoint has been hit. */
30
+void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
31
+ MemTxAttrs attrs, int flags, uintptr_t ra)
32
+{
30
+{
33
+ CPUClass *cc = CPU_GET_CLASS(cpu);
31
+ /* If true and false values are the same, eliminate the cmp. */
34
+ CPUWatchpoint *wp;
32
+ if (args_are_copies(op->args[2], op->args[3])) {
35
+
33
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
36
+ assert(tcg_enabled());
37
+ if (cpu->watchpoint_hit) {
38
+ /*
39
+ * We re-entered the check after replacing the TB.
40
+ * Now raise the debug interrupt so that it will
41
+ * trigger after the current instruction.
42
+ */
43
+ qemu_mutex_lock_iothread();
44
+ cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
45
+ qemu_mutex_unlock_iothread();
46
+ return;
47
+ }
34
+ }
48
+
35
+
49
+ addr = cc->adjust_watchpoint_address(cpu, addr, len);
36
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
50
+ QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
37
+ uint64_t tv = arg_info(op->args[2])->val;
51
+ if (watchpoint_address_matches(wp, addr, len)
38
+ uint64_t fv = arg_info(op->args[3])->val;
52
+ && (wp->flags & flags)) {
39
+
53
+ if (replay_running_debug()) {
40
+ if (tv == -1 && fv == 0) {
54
+ /*
41
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
55
+ * Don't process the watchpoints when we are
42
+ }
56
+ * in a reverse debugging operation.
43
+ if (tv == 0 && fv == -1) {
57
+ */
44
+ if (TCG_TARGET_HAS_not_vec) {
58
+ replay_breakpoint();
45
+ op->opc = INDEX_op_not_vec;
59
+ return;
46
+ return fold_not(ctx, op);
47
+ } else {
48
+ op->opc = INDEX_op_xor_vec;
49
+ op->args[2] = arg_new_constant(ctx, -1);
50
+ return fold_xor(ctx, op);
60
+ }
51
+ }
61
+ if (flags == BP_MEM_READ) {
62
+ wp->flags |= BP_WATCHPOINT_HIT_READ;
63
+ } else {
64
+ wp->flags |= BP_WATCHPOINT_HIT_WRITE;
65
+ }
66
+ wp->hitaddr = MAX(addr, wp->vaddr);
67
+ wp->hitattrs = attrs;
68
+ if (!cpu->watchpoint_hit) {
69
+ if (wp->flags & BP_CPU &&
70
+ !cc->debug_check_watchpoint(cpu, wp)) {
71
+ wp->flags &= ~BP_WATCHPOINT_HIT;
72
+ continue;
73
+ }
74
+ cpu->watchpoint_hit = wp;
75
+
76
+ mmap_lock();
77
+ tb_check_watchpoint(cpu, ra);
78
+ if (wp->flags & BP_STOP_BEFORE_ACCESS) {
79
+ cpu->exception_index = EXCP_DEBUG;
80
+ mmap_unlock();
81
+ cpu_loop_exit_restore(cpu, ra);
82
+ } else {
83
+ /* Force execution of one insn next time. */
84
+ cpu->cflags_next_tb = 1 | curr_cflags();
85
+ mmap_unlock();
86
+ if (ra) {
87
+ cpu_restore_state(cpu, ra, true);
88
+ }
89
+ cpu_loop_exit_noexc(cpu);
90
+ }
91
+ }
92
+ } else {
93
+ wp->flags &= ~BP_WATCHPOINT_HIT;
94
+ }
52
+ }
95
+ }
53
+ }
54
+ if (arg_is_const(op->args[2])) {
55
+ uint64_t tv = arg_info(op->args[2])->val;
56
+ if (tv == -1) {
57
+ op->opc = INDEX_op_or_vec;
58
+ op->args[2] = op->args[3];
59
+ return fold_or(ctx, op);
60
+ }
61
+ if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
62
+ op->opc = INDEX_op_andc_vec;
63
+ op->args[2] = op->args[1];
64
+ op->args[1] = op->args[3];
65
+ return fold_andc(ctx, op);
66
+ }
67
+ }
68
+ if (arg_is_const(op->args[3])) {
69
+ uint64_t fv = arg_info(op->args[3])->val;
70
+ if (fv == 0) {
71
+ op->opc = INDEX_op_and_vec;
72
+ return fold_and(ctx, op);
73
+ }
74
+ if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
75
+ op->opc = INDEX_op_orc_vec;
76
+ op->args[2] = op->args[1];
77
+ op->args[1] = op->args[3];
78
+ return fold_orc(ctx, op);
79
+ }
80
+ }
81
+ return finish_folding(ctx, op);
96
+}
82
+}
97
+
83
+
98
+#endif /* CONFIG_TCG */
84
static bool fold_brcond(OptContext *ctx, TCGOp *op)
99
+
100
/* Called from RCU critical section */
101
static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
102
{
85
{
103
@@ -XXX,XX +XXX,XX @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
86
int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
104
return block->offset + offset;
87
@@ -XXX,XX +XXX,XX @@ static bool fold_xor(OptContext *ctx, TCGOp *op)
88
return fold_masks_zs(ctx, op, z_mask, s_mask);
105
}
89
}
106
90
107
-/* Generate a debug exception if a watchpoint has been hit. */
91
-static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
108
-void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
109
- MemTxAttrs attrs, int flags, uintptr_t ra)
110
-{
92
-{
111
- CPUClass *cc = CPU_GET_CLASS(cpu);
93
- /* If true and false values are the same, eliminate the cmp. */
112
- CPUWatchpoint *wp;
94
- if (args_are_copies(op->args[2], op->args[3])) {
113
-
95
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
114
- assert(tcg_enabled());
115
- if (cpu->watchpoint_hit) {
116
- /*
117
- * We re-entered the check after replacing the TB.
118
- * Now raise the debug interrupt so that it will
119
- * trigger after the current instruction.
120
- */
121
- qemu_mutex_lock_iothread();
122
- cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
123
- qemu_mutex_unlock_iothread();
124
- return;
125
- }
96
- }
126
-
97
-
127
- addr = cc->adjust_watchpoint_address(cpu, addr, len);
98
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
128
- QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
99
- uint64_t tv = arg_info(op->args[2])->val;
129
- if (watchpoint_address_matches(wp, addr, len)
100
- uint64_t fv = arg_info(op->args[3])->val;
130
- && (wp->flags & flags)) {
101
-
131
- if (replay_running_debug()) {
102
- if (tv == -1 && fv == 0) {
132
- /*
103
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
133
- * Don't process the watchpoints when we are
104
- }
134
- * in a reverse debugging operation.
105
- if (tv == 0 && fv == -1) {
135
- */
106
- if (TCG_TARGET_HAS_not_vec) {
136
- replay_breakpoint();
107
- op->opc = INDEX_op_not_vec;
137
- return;
108
- return fold_not(ctx, op);
109
- } else {
110
- op->opc = INDEX_op_xor_vec;
111
- op->args[2] = arg_new_constant(ctx, -1);
112
- return fold_xor(ctx, op);
138
- }
113
- }
139
- if (flags == BP_MEM_READ) {
140
- wp->flags |= BP_WATCHPOINT_HIT_READ;
141
- } else {
142
- wp->flags |= BP_WATCHPOINT_HIT_WRITE;
143
- }
144
- wp->hitaddr = MAX(addr, wp->vaddr);
145
- wp->hitattrs = attrs;
146
- if (!cpu->watchpoint_hit) {
147
- if (wp->flags & BP_CPU &&
148
- !cc->debug_check_watchpoint(cpu, wp)) {
149
- wp->flags &= ~BP_WATCHPOINT_HIT;
150
- continue;
151
- }
152
- cpu->watchpoint_hit = wp;
153
-
154
- mmap_lock();
155
- tb_check_watchpoint(cpu, ra);
156
- if (wp->flags & BP_STOP_BEFORE_ACCESS) {
157
- cpu->exception_index = EXCP_DEBUG;
158
- mmap_unlock();
159
- cpu_loop_exit_restore(cpu, ra);
160
- } else {
161
- /* Force execution of one insn next time. */
162
- cpu->cflags_next_tb = 1 | curr_cflags();
163
- mmap_unlock();
164
- if (ra) {
165
- cpu_restore_state(cpu, ra, true);
166
- }
167
- cpu_loop_exit_noexc(cpu);
168
- }
169
- }
170
- } else {
171
- wp->flags &= ~BP_WATCHPOINT_HIT;
172
- }
114
- }
173
- }
115
- }
116
- if (arg_is_const(op->args[2])) {
117
- uint64_t tv = arg_info(op->args[2])->val;
118
- if (tv == -1) {
119
- op->opc = INDEX_op_or_vec;
120
- op->args[2] = op->args[3];
121
- return fold_or(ctx, op);
122
- }
123
- if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
124
- op->opc = INDEX_op_andc_vec;
125
- op->args[2] = op->args[1];
126
- op->args[1] = op->args[3];
127
- return fold_andc(ctx, op);
128
- }
129
- }
130
- if (arg_is_const(op->args[3])) {
131
- uint64_t fv = arg_info(op->args[3])->val;
132
- if (fv == 0) {
133
- op->opc = INDEX_op_and_vec;
134
- return fold_and(ctx, op);
135
- }
136
- if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
137
- op->opc = INDEX_op_orc_vec;
138
- op->args[2] = op->args[1];
139
- op->args[1] = op->args[3];
140
- return fold_orc(ctx, op);
141
- }
142
- }
143
- return finish_folding(ctx, op);
174
-}
144
-}
175
-
145
-
176
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
146
/* Propagate constants and copies, fold constant expressions. */
177
MemTxAttrs attrs, void *buf, hwaddr len);
147
void tcg_optimize(TCGContext *s)
178
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
148
{
179
--
149
--
180
2.25.1
150
2.43.0
181
182
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
The big comment just above says functions should be sorted.
2
2
3
Signed-off-by: Claudio Fontana <cfontana@suse.de>
3
Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
6
[claudio: rebased on Richard's splitwx work]
7
8
Signed-off-by: Claudio Fontana <cfontana@suse.de>
9
Message-Id: <20210204163931.7358-17-cfontana@suse.de>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
5
---
12
include/hw/boards.h | 2 +-
6
tcg/optimize.c | 60 +++++++++++++++++++++++++-------------------------
13
include/{sysemu => qemu}/accel.h | 14 +++++----
7
1 file changed, 30 insertions(+), 30 deletions(-)
14
include/sysemu/hvf.h | 2 +-
15
include/sysemu/kvm.h | 2 +-
16
include/sysemu/kvm_int.h | 2 +-
17
target/i386/hvf/hvf-i386.h | 2 +-
18
accel/accel-common.c | 50 ++++++++++++++++++++++++++++++
19
accel/{accel.c => accel-softmmu.c} | 27 ++--------------
20
accel/accel-user.c | 24 ++++++++++++++
21
accel/qtest/qtest.c | 2 +-
22
accel/tcg/tcg-all.c | 15 +++++++--
23
accel/xen/xen-all.c | 2 +-
24
bsd-user/main.c | 6 +++-
25
linux-user/main.c | 6 +++-
26
softmmu/memory.c | 2 +-
27
softmmu/qtest.c | 2 +-
28
softmmu/vl.c | 2 +-
29
target/i386/hax/hax-all.c | 2 +-
30
target/i386/hvf/hvf.c | 2 +-
31
target/i386/hvf/x86_task.c | 2 +-
32
target/i386/whpx/whpx-all.c | 2 +-
33
MAINTAINERS | 2 +-
34
accel/meson.build | 4 ++-
35
accel/tcg/meson.build | 2 +-
36
24 files changed, 125 insertions(+), 53 deletions(-)
37
rename include/{sysemu => qemu}/accel.h (95%)
38
create mode 100644 accel/accel-common.c
39
rename accel/{accel.c => accel-softmmu.c} (75%)
40
create mode 100644 accel/accel-user.c
41
8
42
diff --git a/include/hw/boards.h b/include/hw/boards.h
9
diff --git a/tcg/optimize.c b/tcg/optimize.c
43
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
44
--- a/include/hw/boards.h
11
--- a/tcg/optimize.c
45
+++ b/include/hw/boards.h
12
+++ b/tcg/optimize.c
46
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static bool fold_call(OptContext *ctx, TCGOp *op)
47
#include "exec/memory.h"
14
return true;
48
#include "sysemu/hostmem.h"
15
}
49
#include "sysemu/blockdev.h"
16
50
-#include "sysemu/accel.h"
17
+static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
51
+#include "qemu/accel.h"
52
#include "qapi/qapi-types-machine.h"
53
#include "qemu/module.h"
54
#include "qom/object.h"
55
diff --git a/include/sysemu/accel.h b/include/qemu/accel.h
56
similarity index 95%
57
rename from include/sysemu/accel.h
58
rename to include/qemu/accel.h
59
index XXXXXXX..XXXXXXX 100644
60
--- a/include/sysemu/accel.h
61
+++ b/include/qemu/accel.h
62
@@ -XXX,XX +XXX,XX @@
63
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
64
* THE SOFTWARE.
65
*/
66
-#ifndef HW_ACCEL_H
67
-#define HW_ACCEL_H
68
+#ifndef QEMU_ACCEL_H
69
+#define QEMU_ACCEL_H
70
71
#include "qom/object.h"
72
#include "exec/hwaddr.h"
73
@@ -XXX,XX +XXX,XX @@ typedef struct AccelClass {
74
/*< public >*/
75
76
const char *name;
77
-#ifndef CONFIG_USER_ONLY
78
int (*init_machine)(MachineState *ms);
79
+#ifndef CONFIG_USER_ONLY
80
void (*setup_post)(MachineState *ms, AccelState *accel);
81
bool (*has_memory)(MachineState *ms, AddressSpace *as,
82
hwaddr start_addr, hwaddr size);
83
@@ -XXX,XX +XXX,XX @@ typedef struct AccelClass {
84
OBJECT_GET_CLASS(AccelClass, (obj), TYPE_ACCEL)
85
86
AccelClass *accel_find(const char *opt_name);
87
+AccelState *current_accel(void);
88
+
89
+#ifndef CONFIG_USER_ONLY
90
int accel_init_machine(AccelState *accel, MachineState *ms);
91
92
/* Called just before os_setup_post (ie just before drop OS privs) */
93
void accel_setup_post(MachineState *ms);
94
+#endif /* !CONFIG_USER_ONLY */
95
96
-AccelState *current_accel(void);
97
-
98
-#endif
99
+#endif /* QEMU_ACCEL_H */
100
diff --git a/include/sysemu/hvf.h b/include/sysemu/hvf.h
101
index XXXXXXX..XXXXXXX 100644
102
--- a/include/sysemu/hvf.h
103
+++ b/include/sysemu/hvf.h
104
@@ -XXX,XX +XXX,XX @@
105
#ifndef HVF_H
106
#define HVF_H
107
108
-#include "sysemu/accel.h"
109
+#include "qemu/accel.h"
110
#include "qom/object.h"
111
112
#ifdef CONFIG_HVF
113
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
114
index XXXXXXX..XXXXXXX 100644
115
--- a/include/sysemu/kvm.h
116
+++ b/include/sysemu/kvm.h
117
@@ -XXX,XX +XXX,XX @@
118
#include "qemu/queue.h"
119
#include "hw/core/cpu.h"
120
#include "exec/memattrs.h"
121
-#include "sysemu/accel.h"
122
+#include "qemu/accel.h"
123
#include "qom/object.h"
124
125
#ifdef NEED_CPU_H
126
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
127
index XXXXXXX..XXXXXXX 100644
128
--- a/include/sysemu/kvm_int.h
129
+++ b/include/sysemu/kvm_int.h
130
@@ -XXX,XX +XXX,XX @@
131
#define QEMU_KVM_INT_H
132
133
#include "exec/memory.h"
134
-#include "sysemu/accel.h"
135
+#include "qemu/accel.h"
136
#include "sysemu/kvm.h"
137
138
typedef struct KVMSlot
139
diff --git a/target/i386/hvf/hvf-i386.h b/target/i386/hvf/hvf-i386.h
140
index XXXXXXX..XXXXXXX 100644
141
--- a/target/i386/hvf/hvf-i386.h
142
+++ b/target/i386/hvf/hvf-i386.h
143
@@ -XXX,XX +XXX,XX @@
144
#ifndef HVF_I386_H
145
#define HVF_I386_H
146
147
-#include "sysemu/accel.h"
148
+#include "qemu/accel.h"
149
#include "sysemu/hvf.h"
150
#include "cpu.h"
151
#include "x86.h"
152
diff --git a/accel/accel-common.c b/accel/accel-common.c
153
new file mode 100644
154
index XXXXXXX..XXXXXXX
155
--- /dev/null
156
+++ b/accel/accel-common.c
157
@@ -XXX,XX +XXX,XX @@
158
+/*
159
+ * QEMU accel class, components common to system emulation and user mode
160
+ *
161
+ * Copyright (c) 2003-2008 Fabrice Bellard
162
+ * Copyright (c) 2014 Red Hat Inc.
163
+ *
164
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
165
+ * of this software and associated documentation files (the "Software"), to deal
166
+ * in the Software without restriction, including without limitation the rights
167
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
168
+ * copies of the Software, and to permit persons to whom the Software is
169
+ * furnished to do so, subject to the following conditions:
170
+ *
171
+ * The above copyright notice and this permission notice shall be included in
172
+ * all copies or substantial portions of the Software.
173
+ *
174
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
175
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
176
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
177
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
178
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
179
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
180
+ * THE SOFTWARE.
181
+ */
182
+
183
+#include "qemu/osdep.h"
184
+#include "qemu/accel.h"
185
+
186
+static const TypeInfo accel_type = {
187
+ .name = TYPE_ACCEL,
188
+ .parent = TYPE_OBJECT,
189
+ .class_size = sizeof(AccelClass),
190
+ .instance_size = sizeof(AccelState),
191
+};
192
+
193
+/* Lookup AccelClass from opt_name. Returns NULL if not found */
194
+AccelClass *accel_find(const char *opt_name)
195
+{
18
+{
196
+ char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
19
+ /* Canonicalize the comparison to put immediate second. */
197
+ AccelClass *ac = ACCEL_CLASS(object_class_by_name(class_name));
20
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
198
+ g_free(class_name);
21
+ op->args[3] = tcg_swap_cond(op->args[3]);
199
+ return ac;
22
+ }
23
+ return finish_folding(ctx, op);
200
+}
24
+}
201
+
25
+
202
+static void register_accel_types(void)
26
+static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
203
+{
27
+{
204
+ type_register_static(&accel_type);
28
+ /* If true and false values are the same, eliminate the cmp. */
29
+ if (args_are_copies(op->args[3], op->args[4])) {
30
+ return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
31
+ }
32
+
33
+ /* Canonicalize the comparison to put immediate second. */
34
+ if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
35
+ op->args[5] = tcg_swap_cond(op->args[5]);
36
+ }
37
+ /*
38
+ * Canonicalize the "false" input reg to match the destination,
39
+ * so that the tcg backend can implement "move if true".
40
+ */
41
+ if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
42
+ op->args[5] = tcg_invert_cond(op->args[5]);
43
+ }
44
+ return finish_folding(ctx, op);
205
+}
45
+}
206
+
46
+
207
+type_init(register_accel_types);
47
static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
208
diff --git a/accel/accel.c b/accel/accel-softmmu.c
48
{
209
similarity index 75%
49
uint64_t z_mask, s_mask;
210
rename from accel/accel.c
50
@@ -XXX,XX +XXX,XX @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
211
rename to accel/accel-softmmu.c
51
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
212
index XXXXXXX..XXXXXXX 100644
52
}
213
--- a/accel/accel.c
53
214
+++ b/accel/accel-softmmu.c
54
-static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
215
@@ -XXX,XX +XXX,XX @@
216
/*
217
- * QEMU System Emulator, accelerator interfaces
218
+ * QEMU accel class, system emulation components
219
*
220
* Copyright (c) 2003-2008 Fabrice Bellard
221
* Copyright (c) 2014 Red Hat Inc.
222
@@ -XXX,XX +XXX,XX @@
223
*/
224
225
#include "qemu/osdep.h"
226
-#include "sysemu/accel.h"
227
+#include "qemu/accel.h"
228
#include "hw/boards.h"
229
#include "sysemu/arch_init.h"
230
#include "sysemu/sysemu.h"
231
#include "qom/object.h"
232
233
-static const TypeInfo accel_type = {
234
- .name = TYPE_ACCEL,
235
- .parent = TYPE_OBJECT,
236
- .class_size = sizeof(AccelClass),
237
- .instance_size = sizeof(AccelState),
238
-};
239
-
240
-/* Lookup AccelClass from opt_name. Returns NULL if not found */
241
-AccelClass *accel_find(const char *opt_name)
242
-{
55
-{
243
- char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
56
- /* Canonicalize the comparison to put immediate second. */
244
- AccelClass *ac = ACCEL_CLASS(object_class_by_name(class_name));
57
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
245
- g_free(class_name);
58
- op->args[3] = tcg_swap_cond(op->args[3]);
246
- return ac;
59
- }
60
- return finish_folding(ctx, op);
247
-}
61
-}
248
-
62
-
249
int accel_init_machine(AccelState *accel, MachineState *ms)
63
-static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
250
{
64
-{
251
AccelClass *acc = ACCEL_GET_CLASS(accel);
65
- /* If true and false values are the same, eliminate the cmp. */
252
@@ -XXX,XX +XXX,XX @@ void accel_setup_post(MachineState *ms)
66
- if (args_are_copies(op->args[3], op->args[4])) {
253
acc->setup_post(ms, accel);
67
- return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
254
}
68
- }
255
}
256
-
69
-
257
-static void register_accel_types(void)
70
- /* Canonicalize the comparison to put immediate second. */
258
-{
71
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
259
- type_register_static(&accel_type);
72
- op->args[5] = tcg_swap_cond(op->args[5]);
73
- }
74
- /*
75
- * Canonicalize the "false" input reg to match the destination,
76
- * so that the tcg backend can implement "move if true".
77
- */
78
- if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
79
- op->args[5] = tcg_invert_cond(op->args[5]);
80
- }
81
- return finish_folding(ctx, op);
260
-}
82
-}
261
-
83
-
262
-type_init(register_accel_types);
84
static bool fold_sextract(OptContext *ctx, TCGOp *op)
263
diff --git a/accel/accel-user.c b/accel/accel-user.c
85
{
264
new file mode 100644
86
uint64_t z_mask, s_mask, s_mask_old;
265
index XXXXXXX..XXXXXXX
266
--- /dev/null
267
+++ b/accel/accel-user.c
268
@@ -XXX,XX +XXX,XX @@
269
+/*
270
+ * QEMU accel class, user-mode components
271
+ *
272
+ * Copyright 2021 SUSE LLC
273
+ *
274
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
275
+ * See the COPYING file in the top-level directory.
276
+ */
277
+
278
+#include "qemu/osdep.h"
279
+#include "qemu/accel.h"
280
+
281
+AccelState *current_accel(void)
282
+{
283
+ static AccelState *accel;
284
+
285
+ if (!accel) {
286
+ AccelClass *ac = accel_find("tcg");
287
+
288
+ g_assert(ac != NULL);
289
+ accel = ACCEL(object_new_with_class(OBJECT_CLASS(ac)));
290
+ }
291
+ return accel;
292
+}
293
diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c
294
index XXXXXXX..XXXXXXX 100644
295
--- a/accel/qtest/qtest.c
296
+++ b/accel/qtest/qtest.c
297
@@ -XXX,XX +XXX,XX @@
298
#include "qemu/module.h"
299
#include "qemu/option.h"
300
#include "qemu/config-file.h"
301
-#include "sysemu/accel.h"
302
+#include "qemu/accel.h"
303
#include "sysemu/qtest.h"
304
#include "sysemu/cpus.h"
305
#include "sysemu/cpu-timers.h"
306
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
307
index XXXXXXX..XXXXXXX 100644
308
--- a/accel/tcg/tcg-all.c
309
+++ b/accel/tcg/tcg-all.c
310
@@ -XXX,XX +XXX,XX @@
311
#include "tcg/tcg.h"
312
#include "qapi/error.h"
313
#include "qemu/error-report.h"
314
-#include "hw/boards.h"
315
+#include "qemu/accel.h"
316
#include "qapi/qapi-builtin-visit.h"
317
+
318
+#ifndef CONFIG_USER_ONLY
319
#include "tcg-cpus.h"
320
+#endif /* CONFIG_USER_ONLY */
321
322
struct TCGState {
323
AccelState parent_obj;
324
@@ -XXX,XX +XXX,XX @@ static void tcg_accel_instance_init(Object *obj)
325
s->mttcg_enabled = default_mttcg_enabled();
326
327
/* If debugging enabled, default "auto on", otherwise off. */
328
-#ifdef CONFIG_DEBUG_TCG
329
+#if defined(CONFIG_DEBUG_TCG) && !defined(CONFIG_USER_ONLY)
330
s->splitwx_enabled = -1;
331
#else
332
s->splitwx_enabled = 0;
333
@@ -XXX,XX +XXX,XX @@ static int tcg_init(MachineState *ms)
334
mttcg_enabled = s->mttcg_enabled;
335
336
/*
337
- * Initialize TCG regions
338
+ * Initialize TCG regions only for softmmu.
339
+ *
340
+ * This needs to be done later for user mode, because the prologue
341
+ * generation needs to be delayed so that GUEST_BASE is already set.
342
*/
343
+#ifndef CONFIG_USER_ONLY
344
tcg_region_init();
345
346
if (mttcg_enabled) {
347
@@ -XXX,XX +XXX,XX @@ static int tcg_init(MachineState *ms)
348
} else {
349
cpus_register_accel(&tcg_cpus_rr);
350
}
351
+#endif /* !CONFIG_USER_ONLY */
352
+
353
return 0;
354
}
355
356
diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c
357
index XXXXXXX..XXXXXXX 100644
358
--- a/accel/xen/xen-all.c
359
+++ b/accel/xen/xen-all.c
360
@@ -XXX,XX +XXX,XX @@
361
#include "hw/xen/xen-legacy-backend.h"
362
#include "hw/xen/xen_pt.h"
363
#include "chardev/char.h"
364
-#include "sysemu/accel.h"
365
+#include "qemu/accel.h"
366
#include "sysemu/cpus.h"
367
#include "sysemu/xen.h"
368
#include "sysemu/runstate.h"
369
diff --git a/bsd-user/main.c b/bsd-user/main.c
370
index XXXXXXX..XXXXXXX 100644
371
--- a/bsd-user/main.c
372
+++ b/bsd-user/main.c
373
@@ -XXX,XX +XXX,XX @@
374
#include "qemu/osdep.h"
375
#include "qemu-common.h"
376
#include "qemu/units.h"
377
+#include "qemu/accel.h"
378
#include "sysemu/tcg.h"
379
#include "qemu-version.h"
380
#include <machine/trap.h>
381
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
382
}
383
384
/* init tcg before creating CPUs and to get qemu_host_page_size */
385
- tcg_exec_init(0, false);
386
+ {
387
+ AccelClass *ac = ACCEL_GET_CLASS(current_accel());
388
389
+ ac->init_machine(NULL);
390
+ }
391
cpu_type = parse_cpu_option(cpu_model);
392
cpu = cpu_create(cpu_type);
393
env = cpu->env_ptr;
394
diff --git a/linux-user/main.c b/linux-user/main.c
395
index XXXXXXX..XXXXXXX 100644
396
--- a/linux-user/main.c
397
+++ b/linux-user/main.c
398
@@ -XXX,XX +XXX,XX @@
399
#include "qemu/osdep.h"
400
#include "qemu-common.h"
401
#include "qemu/units.h"
402
+#include "qemu/accel.h"
403
#include "sysemu/tcg.h"
404
#include "qemu-version.h"
405
#include <sys/syscall.h>
406
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv, char **envp)
407
cpu_type = parse_cpu_option(cpu_model);
408
409
/* init tcg before creating CPUs and to get qemu_host_page_size */
410
- tcg_exec_init(0, false);
411
+ {
412
+ AccelClass *ac = ACCEL_GET_CLASS(current_accel());
413
414
+ ac->init_machine(NULL);
415
+ }
416
cpu = cpu_create(cpu_type);
417
env = cpu->env_ptr;
418
cpu_reset(cpu);
419
diff --git a/softmmu/memory.c b/softmmu/memory.c
420
index XXXXXXX..XXXXXXX 100644
421
--- a/softmmu/memory.c
422
+++ b/softmmu/memory.c
423
@@ -XXX,XX +XXX,XX @@
424
#include "sysemu/kvm.h"
425
#include "sysemu/runstate.h"
426
#include "sysemu/tcg.h"
427
-#include "sysemu/accel.h"
428
+#include "qemu/accel.h"
429
#include "hw/boards.h"
430
#include "migration/vmstate.h"
431
432
diff --git a/softmmu/qtest.c b/softmmu/qtest.c
433
index XXXXXXX..XXXXXXX 100644
434
--- a/softmmu/qtest.c
435
+++ b/softmmu/qtest.c
436
@@ -XXX,XX +XXX,XX @@
437
#include "exec/ioport.h"
438
#include "exec/memory.h"
439
#include "hw/irq.h"
440
-#include "sysemu/accel.h"
441
+#include "qemu/accel.h"
442
#include "sysemu/cpu-timers.h"
443
#include "qemu/config-file.h"
444
#include "qemu/option.h"
445
diff --git a/softmmu/vl.c b/softmmu/vl.c
446
index XXXXXXX..XXXXXXX 100644
447
--- a/softmmu/vl.c
448
+++ b/softmmu/vl.c
449
@@ -XXX,XX +XXX,XX @@
450
451
#include "qemu/error-report.h"
452
#include "qemu/sockets.h"
453
-#include "sysemu/accel.h"
454
+#include "qemu/accel.h"
455
#include "hw/usb.h"
456
#include "hw/isa/isa.h"
457
#include "hw/scsi/scsi.h"
458
diff --git a/target/i386/hax/hax-all.c b/target/i386/hax/hax-all.c
459
index XXXXXXX..XXXXXXX 100644
460
--- a/target/i386/hax/hax-all.c
461
+++ b/target/i386/hax/hax-all.c
462
@@ -XXX,XX +XXX,XX @@
463
#include "exec/address-spaces.h"
464
465
#include "qemu-common.h"
466
-#include "sysemu/accel.h"
467
+#include "qemu/accel.h"
468
#include "sysemu/reset.h"
469
#include "sysemu/runstate.h"
470
#include "hw/boards.h"
471
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
472
index XXXXXXX..XXXXXXX 100644
473
--- a/target/i386/hvf/hvf.c
474
+++ b/target/i386/hvf/hvf.c
475
@@ -XXX,XX +XXX,XX @@
476
#include "exec/address-spaces.h"
477
#include "hw/i386/apic_internal.h"
478
#include "qemu/main-loop.h"
479
-#include "sysemu/accel.h"
480
+#include "qemu/accel.h"
481
#include "target/i386/cpu.h"
482
483
#include "hvf-cpus.h"
484
diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c
485
index XXXXXXX..XXXXXXX 100644
486
--- a/target/i386/hvf/x86_task.c
487
+++ b/target/i386/hvf/x86_task.c
488
@@ -XXX,XX +XXX,XX @@
489
490
#include "hw/i386/apic_internal.h"
491
#include "qemu/main-loop.h"
492
-#include "sysemu/accel.h"
493
+#include "qemu/accel.h"
494
#include "target/i386/cpu.h"
495
496
// TODO: taskswitch handling
497
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
498
index XXXXXXX..XXXXXXX 100644
499
--- a/target/i386/whpx/whpx-all.c
500
+++ b/target/i386/whpx/whpx-all.c
501
@@ -XXX,XX +XXX,XX @@
502
#include "exec/address-spaces.h"
503
#include "exec/ioport.h"
504
#include "qemu-common.h"
505
-#include "sysemu/accel.h"
506
+#include "qemu/accel.h"
507
#include "sysemu/whpx.h"
508
#include "sysemu/cpus.h"
509
#include "sysemu/runstate.h"
510
diff --git a/MAINTAINERS b/MAINTAINERS
511
index XXXXXXX..XXXXXXX 100644
512
--- a/MAINTAINERS
513
+++ b/MAINTAINERS
514
@@ -XXX,XX +XXX,XX @@ Overall
515
M: Richard Henderson <richard.henderson@linaro.org>
516
R: Paolo Bonzini <pbonzini@redhat.com>
517
S: Maintained
518
-F: include/sysemu/accel.h
519
+F: include/qemu/accel.h
520
F: accel/accel.c
521
F: accel/Makefile.objs
522
F: accel/stubs/Makefile.objs
523
diff --git a/accel/meson.build b/accel/meson.build
524
index XXXXXXX..XXXXXXX 100644
525
--- a/accel/meson.build
526
+++ b/accel/meson.build
527
@@ -XXX,XX +XXX,XX @@
528
-softmmu_ss.add(files('accel.c'))
529
+specific_ss.add(files('accel-common.c'))
530
+softmmu_ss.add(files('accel-softmmu.c'))
531
+user_ss.add(files('accel-user.c'))
532
533
subdir('qtest')
534
subdir('kvm')
535
diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build
536
index XXXXXXX..XXXXXXX 100644
537
--- a/accel/tcg/meson.build
538
+++ b/accel/tcg/meson.build
539
@@ -XXX,XX +XXX,XX @@
540
tcg_ss = ss.source_set()
541
tcg_ss.add(files(
542
+ 'tcg-all.c',
543
'cpu-exec-common.c',
544
'cpu-exec.c',
545
'tcg-runtime-gvec.c',
546
@@ -XXX,XX +XXX,XX @@ tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c'), libdl])
547
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
548
549
specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
550
- 'tcg-all.c',
551
'cputlb.c',
552
'tcg-cpus.c',
553
'tcg-cpus-mttcg.c',
554
--
87
--
555
2.25.1
88
2.43.0
556
557
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
We currently have a flag, float_muladd_halve_result, to scale
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
the result by 2**-1. Extend this to handle arbitrary scaling.
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
6
---
5
tcg/tci.c | 8 +-------
7
include/fpu/softfloat.h | 6 ++++
6
1 file changed, 1 insertion(+), 7 deletions(-)
8
fpu/softfloat.c | 58 ++++++++++++++++++++++-----------------
7
9
fpu/softfloat-parts.c.inc | 7 +++--
8
diff --git a/tcg/tci.c b/tcg/tci.c
10
3 files changed, 44 insertions(+), 27 deletions(-)
11
12
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
9
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
14
--- a/include/fpu/softfloat.h
11
+++ b/tcg/tci.c
15
+++ b/include/fpu/softfloat.h
12
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
16
@@ -XXX,XX +XXX,XX @@ float16 float16_add(float16, float16, float_status *status);
13
t2 = tci_read_s32(&tb_ptr);
17
float16 float16_sub(float16, float16, float_status *status);
14
tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
18
float16 float16_mul(float16, float16, float_status *status);
15
break;
19
float16 float16_muladd(float16, float16, float16, int, float_status *status);
16
- case INDEX_op_st8_i32:
20
+float16 float16_muladd_scalbn(float16, float16, float16,
17
+ CASE_32_64(st8)
21
+ int, int, float_status *status);
18
t0 = tci_read_r8(regs, &tb_ptr);
22
float16 float16_div(float16, float16, float_status *status);
19
t1 = tci_read_r(regs, &tb_ptr);
23
float16 float16_scalbn(float16, int, float_status *status);
20
t2 = tci_read_s32(&tb_ptr);
24
float16 float16_min(float16, float16, float_status *status);
21
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
25
@@ -XXX,XX +XXX,XX @@ float32 float32_mul(float32, float32, float_status *status);
22
t2 = tci_read_s32(&tb_ptr);
26
float32 float32_div(float32, float32, float_status *status);
23
tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2));
27
float32 float32_rem(float32, float32, float_status *status);
24
break;
28
float32 float32_muladd(float32, float32, float32, int, float_status *status);
25
- case INDEX_op_st8_i64:
29
+float32 float32_muladd_scalbn(float32, float32, float32,
26
- t0 = tci_read_r8(regs, &tb_ptr);
30
+ int, int, float_status *status);
27
- t1 = tci_read_r(regs, &tb_ptr);
31
float32 float32_sqrt(float32, float_status *status);
28
- t2 = tci_read_s32(&tb_ptr);
32
float32 float32_exp2(float32, float_status *status);
29
- *(uint8_t *)(t1 + t2) = t0;
33
float32 float32_log2(float32, float_status *status);
30
- break;
34
@@ -XXX,XX +XXX,XX @@ float64 float64_mul(float64, float64, float_status *status);
31
case INDEX_op_st16_i64:
35
float64 float64_div(float64, float64, float_status *status);
32
t0 = tci_read_r16(regs, &tb_ptr);
36
float64 float64_rem(float64, float64, float_status *status);
33
t1 = tci_read_r(regs, &tb_ptr);
37
float64 float64_muladd(float64, float64, float64, int, float_status *status);
38
+float64 float64_muladd_scalbn(float64, float64, float64,
39
+ int, int, float_status *status);
40
float64 float64_sqrt(float64, float_status *status);
41
float64 float64_log2(float64, float_status *status);
42
FloatRelation float64_compare(float64, float64, float_status *status);
43
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/fpu/softfloat.c
46
+++ b/fpu/softfloat.c
47
@@ -XXX,XX +XXX,XX @@ static FloatParts128 *parts128_mul(FloatParts128 *a, FloatParts128 *b,
48
#define parts_mul(A, B, S) \
49
PARTS_GENERIC_64_128(mul, A)(A, B, S)
50
51
-static FloatParts64 *parts64_muladd(FloatParts64 *a, FloatParts64 *b,
52
- FloatParts64 *c, int flags,
53
- float_status *s);
54
-static FloatParts128 *parts128_muladd(FloatParts128 *a, FloatParts128 *b,
55
- FloatParts128 *c, int flags,
56
- float_status *s);
57
+static FloatParts64 *parts64_muladd_scalbn(FloatParts64 *a, FloatParts64 *b,
58
+ FloatParts64 *c, int scale,
59
+ int flags, float_status *s);
60
+static FloatParts128 *parts128_muladd_scalbn(FloatParts128 *a, FloatParts128 *b,
61
+ FloatParts128 *c, int scale,
62
+ int flags, float_status *s);
63
64
-#define parts_muladd(A, B, C, Z, S) \
65
- PARTS_GENERIC_64_128(muladd, A)(A, B, C, Z, S)
66
+#define parts_muladd_scalbn(A, B, C, Z, Y, S) \
67
+ PARTS_GENERIC_64_128(muladd_scalbn, A)(A, B, C, Z, Y, S)
68
69
static FloatParts64 *parts64_div(FloatParts64 *a, FloatParts64 *b,
70
float_status *s);
71
@@ -XXX,XX +XXX,XX @@ floatx80_mul(floatx80 a, floatx80 b, float_status *status)
72
* Fused multiply-add
73
*/
74
75
-float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
76
- int flags, float_status *status)
77
+float16 QEMU_FLATTEN
78
+float16_muladd_scalbn(float16 a, float16 b, float16 c,
79
+ int scale, int flags, float_status *status)
80
{
81
FloatParts64 pa, pb, pc, *pr;
82
83
float16_unpack_canonical(&pa, a, status);
84
float16_unpack_canonical(&pb, b, status);
85
float16_unpack_canonical(&pc, c, status);
86
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
87
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
88
89
return float16_round_pack_canonical(pr, status);
90
}
91
92
-static float32 QEMU_SOFTFLOAT_ATTR
93
-soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
94
- float_status *status)
95
+float16 float16_muladd(float16 a, float16 b, float16 c,
96
+ int flags, float_status *status)
97
+{
98
+ return float16_muladd_scalbn(a, b, c, 0, flags, status);
99
+}
100
+
101
+float32 QEMU_SOFTFLOAT_ATTR
102
+float32_muladd_scalbn(float32 a, float32 b, float32 c,
103
+ int scale, int flags, float_status *status)
104
{
105
FloatParts64 pa, pb, pc, *pr;
106
107
float32_unpack_canonical(&pa, a, status);
108
float32_unpack_canonical(&pb, b, status);
109
float32_unpack_canonical(&pc, c, status);
110
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
111
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
112
113
return float32_round_pack_canonical(pr, status);
114
}
115
116
-static float64 QEMU_SOFTFLOAT_ATTR
117
-soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
118
- float_status *status)
119
+float64 QEMU_SOFTFLOAT_ATTR
120
+float64_muladd_scalbn(float64 a, float64 b, float64 c,
121
+ int scale, int flags, float_status *status)
122
{
123
FloatParts64 pa, pb, pc, *pr;
124
125
float64_unpack_canonical(&pa, a, status);
126
float64_unpack_canonical(&pb, b, status);
127
float64_unpack_canonical(&pc, c, status);
128
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
129
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, scale, flags, status);
130
131
return float64_round_pack_canonical(pr, status);
132
}
133
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
134
return ur.s;
135
136
soft:
137
- return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
138
+ return float32_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
139
}
140
141
float64 QEMU_FLATTEN
142
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
143
return ur.s;
144
145
soft:
146
- return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
147
+ return float64_muladd_scalbn(ua.s, ub.s, uc.s, 0, flags, s);
148
}
149
150
float64 float64r32_muladd(float64 a, float64 b, float64 c,
151
@@ -XXX,XX +XXX,XX @@ float64 float64r32_muladd(float64 a, float64 b, float64 c,
152
float64_unpack_canonical(&pa, a, status);
153
float64_unpack_canonical(&pb, b, status);
154
float64_unpack_canonical(&pc, c, status);
155
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
156
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
157
158
return float64r32_round_pack_canonical(pr, status);
159
}
160
@@ -XXX,XX +XXX,XX @@ bfloat16 QEMU_FLATTEN bfloat16_muladd(bfloat16 a, bfloat16 b, bfloat16 c,
161
bfloat16_unpack_canonical(&pa, a, status);
162
bfloat16_unpack_canonical(&pb, b, status);
163
bfloat16_unpack_canonical(&pc, c, status);
164
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
165
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
166
167
return bfloat16_round_pack_canonical(pr, status);
168
}
169
@@ -XXX,XX +XXX,XX @@ float128 QEMU_FLATTEN float128_muladd(float128 a, float128 b, float128 c,
170
float128_unpack_canonical(&pa, a, status);
171
float128_unpack_canonical(&pb, b, status);
172
float128_unpack_canonical(&pc, c, status);
173
- pr = parts_muladd(&pa, &pb, &pc, flags, status);
174
+ pr = parts_muladd_scalbn(&pa, &pb, &pc, 0, flags, status);
175
176
return float128_round_pack_canonical(pr, status);
177
}
178
@@ -XXX,XX +XXX,XX @@ float32 float32_exp2(float32 a, float_status *status)
179
180
float64_unpack_canonical(&rp, float64_one, status);
181
for (i = 0 ; i < 15 ; i++) {
182
+
183
float64_unpack_canonical(&tp, float32_exp2_coefficients[i], status);
184
- rp = *parts_muladd(&tp, &xnp, &rp, 0, status);
185
+ rp = *parts_muladd_scalbn(&tp, &xnp, &rp, 0, 0, status);
186
xnp = *parts_mul(&xnp, &xp, status);
187
}
188
189
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
190
index XXXXXXX..XXXXXXX 100644
191
--- a/fpu/softfloat-parts.c.inc
192
+++ b/fpu/softfloat-parts.c.inc
193
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(mul)(FloatPartsN *a, FloatPartsN *b,
194
* Requires A and C extracted into a double-sized structure to provide the
195
* extra space for the widening multiply.
196
*/
197
-static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
198
- FloatPartsN *c, int flags, float_status *s)
199
+static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
200
+ FloatPartsN *c, int scale,
201
+ int flags, float_status *s)
202
{
203
int ab_mask, abc_mask;
204
FloatPartsW p_widen, c_widen;
205
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd)(FloatPartsN *a, FloatPartsN *b,
206
a->exp = p_widen.exp;
207
208
return_normal:
209
+ /* TODO: Replace all use of float_muladd_halve_result with scale. */
210
if (flags & float_muladd_halve_result) {
211
a->exp -= 1;
212
}
213
+ a->exp += scale;
214
finish_sign:
215
if (flags & float_muladd_negate_result) {
216
a->sign ^= 1;
34
--
217
--
35
2.25.1
218
2.43.0
36
219
37
220
diff view generated by jsdifflib
1
From: Eduardo Habkost <ehabkost@redhat.com>
1
Use the scalbn interface instead of float_muladd_halve_result.
2
2
3
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
[claudio: wrapped target code in CONFIG_TCG, reworded comments]
5
Signed-off-by: Claudio Fontana <cfontana@suse.de>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Message-Id: <20210204163931.7358-5-cfontana@suse.de>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
include/hw/core/cpu.h | 22 +++++++++++++---------
6
target/arm/tcg/helper-a64.c | 6 +++---
11
accel/tcg/cpu-exec.c | 4 ++--
7
1 file changed, 3 insertions(+), 3 deletions(-)
12
target/arm/cpu.c | 4 +++-
13
target/avr/cpu.c | 2 +-
14
target/hppa/cpu.c | 2 +-
15
target/i386/tcg/tcg-cpu.c | 2 +-
16
target/microblaze/cpu.c | 2 +-
17
target/mips/cpu.c | 4 +++-
18
target/riscv/cpu.c | 2 +-
19
target/rx/cpu.c | 2 +-
20
target/sh4/cpu.c | 2 +-
21
target/sparc/cpu.c | 2 +-
22
target/tricore/cpu.c | 2 +-
23
13 files changed, 30 insertions(+), 22 deletions(-)
24
8
25
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
9
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
26
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
27
--- a/include/hw/core/cpu.h
11
--- a/target/arm/tcg/helper-a64.c
28
+++ b/include/hw/core/cpu.h
12
+++ b/target/arm/tcg/helper-a64.c
29
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
13
@@ -XXX,XX +XXX,XX @@ uint32_t HELPER(rsqrtsf_f16)(uint32_t a, uint32_t b, float_status *fpst)
30
* Called when the first CPU is realized.
14
(float16_is_infinity(b) && float16_is_zero(a))) {
31
*/
15
return float16_one_point_five;
32
void (*initialize)(void);
33
+ /**
34
+ * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
35
+ *
36
+ * This is called when we abandon execution of a TB before starting it,
37
+ * and must set all parts of the CPU state which the previous TB in the
38
+ * chain may not have updated.
39
+ * By default, when this is NULL, a call is made to @set_pc(tb->pc).
40
+ *
41
+ * If more state needs to be restored, the target must implement a
42
+ * function to restore all the state, and register it here.
43
+ */
44
+ void (*synchronize_from_tb)(CPUState *cpu,
45
+ const struct TranslationBlock *tb);
46
47
} TcgCpuOperations;
48
49
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
50
* If the target behaviour here is anything other than "set
51
* the PC register to the value passed in" then the target must
52
* also implement the synchronize_from_tb hook.
53
- * @synchronize_from_tb: Callback for synchronizing state from a TCG
54
- * #TranslationBlock. This is called when we abandon execution
55
- * of a TB before starting it, and must set all parts of the CPU
56
- * state which the previous TB in the chain may not have updated.
57
- * This always includes at least the program counter; some targets
58
- * will need to do more. If this hook is not implemented then the
59
- * default is to call @set_pc(tb->pc).
60
* @tlb_fill: Callback for handling a softmmu tlb miss or user-only
61
* address fault. For system mode, if the access is valid, call
62
* tlb_set_page and return true; if the access is invalid, and
63
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
64
void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
65
Error **errp);
66
void (*set_pc)(CPUState *cpu, vaddr value);
67
- void (*synchronize_from_tb)(CPUState *cpu,
68
- const struct TranslationBlock *tb);
69
bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
70
MMUAccessType access_type, int mmu_idx,
71
bool probe, uintptr_t retaddr);
72
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/accel/tcg/cpu-exec.c
75
+++ b/accel/tcg/cpu-exec.c
76
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
77
TARGET_FMT_lx "] %s\n",
78
last_tb->tc.ptr, last_tb->pc,
79
lookup_symbol(last_tb->pc));
80
- if (cc->synchronize_from_tb) {
81
- cc->synchronize_from_tb(cpu, last_tb);
82
+ if (cc->tcg_ops.synchronize_from_tb) {
83
+ cc->tcg_ops.synchronize_from_tb(cpu, last_tb);
84
} else {
85
assert(cc->set_pc);
86
cc->set_pc(cpu, last_tb->pc);
87
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/target/arm/cpu.c
90
+++ b/target/arm/cpu.c
91
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
92
}
16
}
17
- return float16_muladd(a, b, float16_three, float_muladd_halve_result, fpst);
18
+ return float16_muladd_scalbn(a, b, float16_three, -1, 0, fpst);
93
}
19
}
94
20
95
+#ifdef CONFIG_TCG
21
float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
96
static void arm_cpu_synchronize_from_tb(CPUState *cs,
22
@@ -XXX,XX +XXX,XX @@ float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, float_status *fpst)
97
const TranslationBlock *tb)
23
(float32_is_infinity(b) && float32_is_zero(a))) {
98
{
24
return float32_one_point_five;
99
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_synchronize_from_tb(CPUState *cs,
100
env->regs[15] = tb->pc;
101
}
25
}
26
- return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
27
+ return float32_muladd_scalbn(a, b, float32_three, -1, 0, fpst);
102
}
28
}
103
+#endif /* CONFIG_TCG */
29
104
30
float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
105
static bool arm_cpu_has_work(CPUState *cs)
31
@@ -XXX,XX +XXX,XX @@ float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, float_status *fpst)
106
{
32
(float64_is_infinity(b) && float64_is_zero(a))) {
107
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
33
return float64_one_point_five;
108
cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
109
cc->dump_state = arm_cpu_dump_state;
110
cc->set_pc = arm_cpu_set_pc;
111
- cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
112
cc->gdb_read_register = arm_cpu_gdb_read_register;
113
cc->gdb_write_register = arm_cpu_gdb_write_register;
114
#ifndef CONFIG_USER_ONLY
115
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
116
cc->disas_set_info = arm_disas_set_info;
117
#ifdef CONFIG_TCG
118
cc->tcg_ops.initialize = arm_translate_init;
119
+ cc->tcg_ops.synchronize_from_tb = arm_cpu_synchronize_from_tb;
120
cc->tlb_fill = arm_cpu_tlb_fill;
121
cc->debug_excp_handler = arm_debug_excp_handler;
122
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
123
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/target/avr/cpu.c
126
+++ b/target/avr/cpu.c
127
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
128
cc->vmsd = &vms_avr_cpu;
129
cc->disas_set_info = avr_cpu_disas_set_info;
130
cc->tcg_ops.initialize = avr_cpu_tcg_init;
131
- cc->synchronize_from_tb = avr_cpu_synchronize_from_tb;
132
+ cc->tcg_ops.synchronize_from_tb = avr_cpu_synchronize_from_tb;
133
cc->gdb_read_register = avr_cpu_gdb_read_register;
134
cc->gdb_write_register = avr_cpu_gdb_write_register;
135
cc->gdb_num_core_regs = 35;
136
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
137
index XXXXXXX..XXXXXXX 100644
138
--- a/target/hppa/cpu.c
139
+++ b/target/hppa/cpu.c
140
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
141
cc->cpu_exec_interrupt = hppa_cpu_exec_interrupt;
142
cc->dump_state = hppa_cpu_dump_state;
143
cc->set_pc = hppa_cpu_set_pc;
144
- cc->synchronize_from_tb = hppa_cpu_synchronize_from_tb;
145
+ cc->tcg_ops.synchronize_from_tb = hppa_cpu_synchronize_from_tb;
146
cc->gdb_read_register = hppa_cpu_gdb_read_register;
147
cc->gdb_write_register = hppa_cpu_gdb_write_register;
148
cc->tlb_fill = hppa_cpu_tlb_fill;
149
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
150
index XXXXXXX..XXXXXXX 100644
151
--- a/target/i386/tcg/tcg-cpu.c
152
+++ b/target/i386/tcg/tcg-cpu.c
153
@@ -XXX,XX +XXX,XX @@ void tcg_cpu_common_class_init(CPUClass *cc)
154
{
155
cc->do_interrupt = x86_cpu_do_interrupt;
156
cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
157
- cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
158
+ cc->tcg_ops.synchronize_from_tb = x86_cpu_synchronize_from_tb;
159
cc->cpu_exec_enter = x86_cpu_exec_enter;
160
cc->cpu_exec_exit = x86_cpu_exec_exit;
161
cc->tcg_ops.initialize = tcg_x86_init;
162
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/target/microblaze/cpu.c
165
+++ b/target/microblaze/cpu.c
166
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
167
cc->cpu_exec_interrupt = mb_cpu_exec_interrupt;
168
cc->dump_state = mb_cpu_dump_state;
169
cc->set_pc = mb_cpu_set_pc;
170
- cc->synchronize_from_tb = mb_cpu_synchronize_from_tb;
171
+ cc->tcg_ops.synchronize_from_tb = mb_cpu_synchronize_from_tb;
172
cc->gdb_read_register = mb_cpu_gdb_read_register;
173
cc->gdb_write_register = mb_cpu_gdb_write_register;
174
cc->tlb_fill = mb_cpu_tlb_fill;
175
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/target/mips/cpu.c
178
+++ b/target/mips/cpu.c
179
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_set_pc(CPUState *cs, vaddr value)
180
}
34
}
35
- return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
36
+ return float64_muladd_scalbn(a, b, float64_three, -1, 0, fpst);
181
}
37
}
182
38
183
+#ifdef CONFIG_TCG
39
/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
184
static void mips_cpu_synchronize_from_tb(CPUState *cs,
185
const TranslationBlock *tb)
186
{
187
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_synchronize_from_tb(CPUState *cs,
188
env->hflags &= ~MIPS_HFLAG_BMASK;
189
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
190
}
191
+#endif /* CONFIG_TCG */
192
193
static bool mips_cpu_has_work(CPUState *cs)
194
{
195
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
196
cc->cpu_exec_interrupt = mips_cpu_exec_interrupt;
197
cc->dump_state = mips_cpu_dump_state;
198
cc->set_pc = mips_cpu_set_pc;
199
- cc->synchronize_from_tb = mips_cpu_synchronize_from_tb;
200
cc->gdb_read_register = mips_cpu_gdb_read_register;
201
cc->gdb_write_register = mips_cpu_gdb_write_register;
202
#ifndef CONFIG_USER_ONLY
203
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
204
cc->disas_set_info = mips_cpu_disas_set_info;
205
#ifdef CONFIG_TCG
206
cc->tcg_ops.initialize = mips_tcg_init;
207
+ cc->tcg_ops.synchronize_from_tb = mips_cpu_synchronize_from_tb;
208
cc->tlb_fill = mips_cpu_tlb_fill;
209
#endif
210
211
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
212
index XXXXXXX..XXXXXXX 100644
213
--- a/target/riscv/cpu.c
214
+++ b/target/riscv/cpu.c
215
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
216
cc->cpu_exec_interrupt = riscv_cpu_exec_interrupt;
217
cc->dump_state = riscv_cpu_dump_state;
218
cc->set_pc = riscv_cpu_set_pc;
219
- cc->synchronize_from_tb = riscv_cpu_synchronize_from_tb;
220
+ cc->tcg_ops.synchronize_from_tb = riscv_cpu_synchronize_from_tb;
221
cc->gdb_read_register = riscv_cpu_gdb_read_register;
222
cc->gdb_write_register = riscv_cpu_gdb_write_register;
223
cc->gdb_num_core_regs = 33;
224
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
225
index XXXXXXX..XXXXXXX 100644
226
--- a/target/rx/cpu.c
227
+++ b/target/rx/cpu.c
228
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
229
cc->cpu_exec_interrupt = rx_cpu_exec_interrupt;
230
cc->dump_state = rx_cpu_dump_state;
231
cc->set_pc = rx_cpu_set_pc;
232
- cc->synchronize_from_tb = rx_cpu_synchronize_from_tb;
233
+ cc->tcg_ops.synchronize_from_tb = rx_cpu_synchronize_from_tb;
234
cc->gdb_read_register = rx_cpu_gdb_read_register;
235
cc->gdb_write_register = rx_cpu_gdb_write_register;
236
cc->get_phys_page_debug = rx_cpu_get_phys_page_debug;
237
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/target/sh4/cpu.c
240
+++ b/target/sh4/cpu.c
241
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
242
cc->cpu_exec_interrupt = superh_cpu_exec_interrupt;
243
cc->dump_state = superh_cpu_dump_state;
244
cc->set_pc = superh_cpu_set_pc;
245
- cc->synchronize_from_tb = superh_cpu_synchronize_from_tb;
246
+ cc->tcg_ops.synchronize_from_tb = superh_cpu_synchronize_from_tb;
247
cc->gdb_read_register = superh_cpu_gdb_read_register;
248
cc->gdb_write_register = superh_cpu_gdb_write_register;
249
cc->tlb_fill = superh_cpu_tlb_fill;
250
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
251
index XXXXXXX..XXXXXXX 100644
252
--- a/target/sparc/cpu.c
253
+++ b/target/sparc/cpu.c
254
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
255
cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
256
#endif
257
cc->set_pc = sparc_cpu_set_pc;
258
- cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb;
259
+ cc->tcg_ops.synchronize_from_tb = sparc_cpu_synchronize_from_tb;
260
cc->gdb_read_register = sparc_cpu_gdb_read_register;
261
cc->gdb_write_register = sparc_cpu_gdb_write_register;
262
cc->tlb_fill = sparc_cpu_tlb_fill;
263
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
264
index XXXXXXX..XXXXXXX 100644
265
--- a/target/tricore/cpu.c
266
+++ b/target/tricore/cpu.c
267
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
268
269
cc->dump_state = tricore_cpu_dump_state;
270
cc->set_pc = tricore_cpu_set_pc;
271
- cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
272
+ cc->tcg_ops.synchronize_from_tb = tricore_cpu_synchronize_from_tb;
273
cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug;
274
cc->tcg_ops.initialize = tricore_tcg_init;
275
cc->tlb_fill = tricore_cpu_tlb_fill;
276
--
40
--
277
2.25.1
41
2.43.0
278
42
279
43
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
Use the scalbn interface instead of float_muladd_halve_result.
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/tci.c | 7 +------
6
target/sparc/helper.h | 4 +-
6
1 file changed, 1 insertion(+), 6 deletions(-)
7
target/sparc/fop_helper.c | 8 ++--
7
8
target/sparc/translate.c | 80 +++++++++++++++++++++++----------------
8
diff --git a/tcg/tci.c b/tcg/tci.c
9
3 files changed, 54 insertions(+), 38 deletions(-)
10
11
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
9
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
13
--- a/target/sparc/helper.h
11
+++ b/tcg/tci.c
14
+++ b/target/sparc/helper.h
12
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
15
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_WG, f64, env, f64, f64)
13
tci_write_reg(regs, t0, *(int16_t *)(t1 + t2));
16
DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_WG, f64, env, f64, f64)
14
break;
17
DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
15
case INDEX_op_ld_i32:
18
DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_WG, f64, env, f64, f64)
16
+ CASE_64(ld32u)
19
-DEF_HELPER_FLAGS_5(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, i32)
17
t0 = *tb_ptr++;
20
+DEF_HELPER_FLAGS_6(fmaddd, TCG_CALL_NO_WG, f64, env, f64, f64, f64, s32, i32)
18
t1 = tci_read_r(regs, &tb_ptr);
21
DEF_HELPER_FLAGS_3(fnaddd, TCG_CALL_NO_WG, f64, env, f64, f64)
19
t2 = tci_read_s32(&tb_ptr);
22
DEF_HELPER_FLAGS_3(fnmuld, TCG_CALL_NO_WG, f64, env, f64, f64)
20
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
23
21
24
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_WG, f32, env, f32, f32)
22
/* Load/store operations (64 bit). */
25
DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_WG, f32, env, f32, f32)
23
26
DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
24
- case INDEX_op_ld32u_i64:
27
DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_WG, f32, env, f32, f32)
25
- t0 = *tb_ptr++;
28
-DEF_HELPER_FLAGS_5(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, i32)
26
- t1 = tci_read_r(regs, &tb_ptr);
29
+DEF_HELPER_FLAGS_6(fmadds, TCG_CALL_NO_WG, f32, env, f32, f32, f32, s32, i32)
27
- t2 = tci_read_s32(&tb_ptr);
30
DEF_HELPER_FLAGS_3(fnadds, TCG_CALL_NO_WG, f32, env, f32, f32)
28
- tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
31
DEF_HELPER_FLAGS_3(fnmuls, TCG_CALL_NO_WG, f32, env, f32, f32)
29
- break;
32
30
case INDEX_op_ld32s_i64:
33
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
31
t0 = *tb_ptr++;
34
index XXXXXXX..XXXXXXX 100644
32
t1 = tci_read_r(regs, &tb_ptr);
35
--- a/target/sparc/fop_helper.c
36
+++ b/target/sparc/fop_helper.c
37
@@ -XXX,XX +XXX,XX @@ Int128 helper_fsqrtq(CPUSPARCState *env, Int128 src)
38
}
39
40
float32 helper_fmadds(CPUSPARCState *env, float32 s1,
41
- float32 s2, float32 s3, uint32_t op)
42
+ float32 s2, float32 s3, int32_t sc, uint32_t op)
43
{
44
- float32 ret = float32_muladd(s1, s2, s3, op, &env->fp_status);
45
+ float32 ret = float32_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
46
check_ieee_exceptions(env, GETPC());
47
return ret;
48
}
49
50
float64 helper_fmaddd(CPUSPARCState *env, float64 s1,
51
- float64 s2, float64 s3, uint32_t op)
52
+ float64 s2, float64 s3, int32_t sc, uint32_t op)
53
{
54
- float64 ret = float64_muladd(s1, s2, s3, op, &env->fp_status);
55
+ float64 ret = float64_muladd_scalbn(s1, s2, s3, sc, op, &env->fp_status);
56
check_ieee_exceptions(env, GETPC());
57
return ret;
58
}
59
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/sparc/translate.c
62
+++ b/target/sparc/translate.c
63
@@ -XXX,XX +XXX,XX @@ static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
64
65
static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
66
{
67
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
68
+ TCGv_i32 z = tcg_constant_i32(0);
69
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
70
}
71
72
static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
73
{
74
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
75
+ TCGv_i32 z = tcg_constant_i32(0);
76
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
77
}
78
79
static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
80
{
81
- int op = float_muladd_negate_c;
82
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
83
+ TCGv_i32 z = tcg_constant_i32(0);
84
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
85
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
86
}
87
88
static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
89
{
90
- int op = float_muladd_negate_c;
91
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
92
+ TCGv_i32 z = tcg_constant_i32(0);
93
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
94
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
95
}
96
97
static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
98
{
99
- int op = float_muladd_negate_c | float_muladd_negate_result;
100
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
101
+ TCGv_i32 z = tcg_constant_i32(0);
102
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
103
+ float_muladd_negate_result);
104
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
105
}
106
107
static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
108
{
109
- int op = float_muladd_negate_c | float_muladd_negate_result;
110
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
111
+ TCGv_i32 z = tcg_constant_i32(0);
112
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
113
+ float_muladd_negate_result);
114
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
115
}
116
117
static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
118
{
119
- int op = float_muladd_negate_result;
120
- gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
121
+ TCGv_i32 z = tcg_constant_i32(0);
122
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
123
+ gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
124
}
125
126
static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
127
{
128
- int op = float_muladd_negate_result;
129
- gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
130
+ TCGv_i32 z = tcg_constant_i32(0);
131
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
132
+ gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
133
}
134
135
/* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
136
static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
137
{
138
- TCGv_i32 one = tcg_constant_i32(float32_one);
139
- int op = float_muladd_halve_result;
140
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
141
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
142
+ TCGv_i32 mone = tcg_constant_i32(-1);
143
+ TCGv_i32 op = tcg_constant_i32(0);
144
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
145
}
146
147
static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
148
{
149
- TCGv_i64 one = tcg_constant_i64(float64_one);
150
- int op = float_muladd_halve_result;
151
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
152
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
153
+ TCGv_i32 mone = tcg_constant_i32(-1);
154
+ TCGv_i32 op = tcg_constant_i32(0);
155
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
156
}
157
158
/* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
159
static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
160
{
161
- TCGv_i32 one = tcg_constant_i32(float32_one);
162
- int op = float_muladd_negate_c | float_muladd_halve_result;
163
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
164
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
165
+ TCGv_i32 mone = tcg_constant_i32(-1);
166
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
167
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
168
}
169
170
static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
171
{
172
- TCGv_i64 one = tcg_constant_i64(float64_one);
173
- int op = float_muladd_negate_c | float_muladd_halve_result;
174
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
175
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
176
+ TCGv_i32 mone = tcg_constant_i32(-1);
177
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
178
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
179
}
180
181
/* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
182
static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
183
{
184
- TCGv_i32 one = tcg_constant_i32(float32_one);
185
- int op = float_muladd_negate_result | float_muladd_halve_result;
186
- gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
187
+ TCGv_i32 fone = tcg_constant_i32(float32_one);
188
+ TCGv_i32 mone = tcg_constant_i32(-1);
189
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
190
+ gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
191
}
192
193
static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
194
{
195
- TCGv_i64 one = tcg_constant_i64(float64_one);
196
- int op = float_muladd_negate_result | float_muladd_halve_result;
197
- gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
198
+ TCGv_i64 fone = tcg_constant_i64(float64_one);
199
+ TCGv_i32 mone = tcg_constant_i32(-1);
200
+ TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
201
+ gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
202
}
203
204
static void gen_op_fpexception_im(DisasContext *dc, int ftt)
33
--
205
--
34
2.25.1
206
2.43.0
35
207
36
208
diff view generated by jsdifflib
1
Eliminating a TODO for ld16s_i64.
1
All uses have been convered to float*_muladd_scalbn.
2
2
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/tci.c | 5 +----
6
include/fpu/softfloat.h | 3 ---
8
1 file changed, 1 insertion(+), 4 deletions(-)
7
fpu/softfloat.c | 6 ------
8
fpu/softfloat-parts.c.inc | 4 ----
9
3 files changed, 13 deletions(-)
9
10
10
diff --git a/tcg/tci.c b/tcg/tci.c
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tci.c
13
--- a/include/fpu/softfloat.h
13
+++ b/tcg/tci.c
14
+++ b/include/fpu/softfloat.h
14
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
15
t2 = tci_read_s32(&tb_ptr);
16
| Using these differs from negating an input or output before calling
16
tci_write_reg(regs, t0, *(uint16_t *)(t1 + t2));
17
| the muladd function in that this means that a NaN doesn't have its
17
break;
18
| sign bit inverted before it is propagated.
18
- case INDEX_op_ld16s_i32:
19
-| We also support halving the result before rounding, as a special
19
+ CASE_32_64(ld16s)
20
-| case to support the ARM fused-sqrt-step instruction FRSQRTS.
20
t0 = *tb_ptr++;
21
*----------------------------------------------------------------------------*/
21
t1 = tci_read_r(regs, &tb_ptr);
22
enum {
22
t2 = tci_read_s32(&tb_ptr);
23
float_muladd_negate_c = 1,
23
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
24
float_muladd_negate_product = 2,
24
25
float_muladd_negate_result = 4,
25
/* Load/store operations (64 bit). */
26
- float_muladd_halve_result = 8,
26
27
};
27
- case INDEX_op_ld16s_i64:
28
28
- TODO();
29
/*----------------------------------------------------------------------------
29
- break;
30
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
30
case INDEX_op_ld32u_i64:
31
index XXXXXXX..XXXXXXX 100644
31
t0 = *tb_ptr++;
32
--- a/fpu/softfloat.c
32
t1 = tci_read_r(regs, &tb_ptr);
33
+++ b/fpu/softfloat.c
34
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
35
if (unlikely(!can_use_fpu(s))) {
36
goto soft;
37
}
38
- if (unlikely(flags & float_muladd_halve_result)) {
39
- goto soft;
40
- }
41
42
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
43
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
44
@@ -XXX,XX +XXX,XX @@ float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
45
if (unlikely(!can_use_fpu(s))) {
46
goto soft;
47
}
48
- if (unlikely(flags & float_muladd_halve_result)) {
49
- goto soft;
50
- }
51
52
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
53
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
54
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
55
index XXXXXXX..XXXXXXX 100644
56
--- a/fpu/softfloat-parts.c.inc
57
+++ b/fpu/softfloat-parts.c.inc
58
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
59
a->exp = p_widen.exp;
60
61
return_normal:
62
- /* TODO: Replace all use of float_muladd_halve_result with scale. */
63
- if (flags & float_muladd_halve_result) {
64
- a->exp -= 1;
65
- }
66
a->exp += scale;
67
finish_sign:
68
if (flags & float_muladd_negate_result) {
33
--
69
--
34
2.25.1
70
2.43.0
35
71
36
72
diff view generated by jsdifflib
1
Eliminating a TODO for ld16u_i32.
1
This rounding mode is used by Hexagon.
2
2
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
4
---
7
tcg/tci.c | 13 +++++--------
5
include/fpu/softfloat-types.h | 2 ++
8
1 file changed, 5 insertions(+), 8 deletions(-)
6
fpu/softfloat-parts.c.inc | 3 +++
7
2 files changed, 5 insertions(+)
9
8
10
diff --git a/tcg/tci.c b/tcg/tci.c
9
diff --git a/include/fpu/softfloat-types.h b/include/fpu/softfloat-types.h
11
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tci.c
11
--- a/include/fpu/softfloat-types.h
13
+++ b/tcg/tci.c
12
+++ b/include/fpu/softfloat-types.h
14
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
13
@@ -XXX,XX +XXX,XX @@ typedef enum __attribute__((__packed__)) {
15
t2 = tci_read_s32(&tb_ptr);
14
float_round_to_odd = 5,
16
tci_write_reg(regs, t0, *(int8_t *)(t1 + t2));
15
/* Not an IEEE rounding mode: round to closest odd, overflow to inf */
17
break;
16
float_round_to_odd_inf = 6,
18
- case INDEX_op_ld16u_i32:
17
+ /* Not an IEEE rounding mode: round to nearest even, overflow to max */
19
- TODO();
18
+ float_round_nearest_even_max = 7,
20
+ CASE_32_64(ld16u)
19
} FloatRoundMode;
21
+ t0 = *tb_ptr++;
20
22
+ t1 = tci_read_r(regs, &tb_ptr);
21
/*
23
+ t2 = tci_read_s32(&tb_ptr);
22
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
24
+ tci_write_reg(regs, t0, *(uint16_t *)(t1 + t2));
23
index XXXXXXX..XXXXXXX 100644
25
break;
24
--- a/fpu/softfloat-parts.c.inc
26
case INDEX_op_ld16s_i32:
25
+++ b/fpu/softfloat-parts.c.inc
27
t0 = *tb_ptr++;
26
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
28
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
27
int exp, flags = 0;
29
28
30
/* Load/store operations (64 bit). */
29
switch (s->float_rounding_mode) {
31
30
+ case float_round_nearest_even_max:
32
- case INDEX_op_ld16u_i64:
31
+ overflow_norm = true;
33
- t0 = *tb_ptr++;
32
+ /* fall through */
34
- t1 = tci_read_r(regs, &tb_ptr);
33
case float_round_nearest_even:
35
- t2 = tci_read_s32(&tb_ptr);
34
if (N > 64 && frac_lsb == 0) {
36
- tci_write_reg(regs, t0, *(uint16_t *)(t1 + t2));
35
inc = ((p->frac_hi & 1) || (p->frac_lo & round_mask) != frac_lsbm1
37
- break;
38
case INDEX_op_ld16s_i64:
39
TODO();
40
break;
41
--
36
--
42
2.25.1
37
2.43.0
43
44
diff view generated by jsdifflib
1
Eliminating a TODO for ld8s_i32.
1
Certain Hexagon instructions suppress changes to the result
2
when the product of fma() is a true zero.
2
3
3
Tested-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
tcg/tci.c | 13 +++++--------
6
include/fpu/softfloat.h | 5 +++++
8
1 file changed, 5 insertions(+), 8 deletions(-)
7
fpu/softfloat.c | 3 +++
8
fpu/softfloat-parts.c.inc | 4 +++-
9
3 files changed, 11 insertions(+), 1 deletion(-)
9
10
10
diff --git a/tcg/tci.c b/tcg/tci.c
11
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tci.c
13
--- a/include/fpu/softfloat.h
13
+++ b/tcg/tci.c
14
+++ b/include/fpu/softfloat.h
14
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
15
@@ -XXX,XX +XXX,XX @@ bfloat16 bfloat16_squash_input_denormal(bfloat16 a, float_status *status);
15
t2 = tci_read_s32(&tb_ptr);
16
| Using these differs from negating an input or output before calling
16
tci_write_reg(regs, t0, *(uint8_t *)(t1 + t2));
17
| the muladd function in that this means that a NaN doesn't have its
17
break;
18
| sign bit inverted before it is propagated.
18
- case INDEX_op_ld8s_i32:
19
+|
19
- TODO();
20
+| With float_muladd_suppress_add_product_zero, if A or B is zero
20
+ CASE_32_64(ld8s)
21
+| such that the product is a true zero, then return C without addition.
21
+ t0 = *tb_ptr++;
22
+| This preserves the sign of C when C is +/- 0. Used for Hexagon.
22
+ t1 = tci_read_r(regs, &tb_ptr);
23
*----------------------------------------------------------------------------*/
23
+ t2 = tci_read_s32(&tb_ptr);
24
enum {
24
+ tci_write_reg(regs, t0, *(int8_t *)(t1 + t2));
25
float_muladd_negate_c = 1,
25
break;
26
float_muladd_negate_product = 2,
26
case INDEX_op_ld16u_i32:
27
float_muladd_negate_result = 4,
27
TODO();
28
+ float_muladd_suppress_add_product_zero = 8,
28
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
29
};
29
30
30
/* Load/store operations (64 bit). */
31
/*----------------------------------------------------------------------------
31
32
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
32
- case INDEX_op_ld8s_i64:
33
index XXXXXXX..XXXXXXX 100644
33
- t0 = *tb_ptr++;
34
--- a/fpu/softfloat.c
34
- t1 = tci_read_r(regs, &tb_ptr);
35
+++ b/fpu/softfloat.c
35
- t2 = tci_read_s32(&tb_ptr);
36
@@ -XXX,XX +XXX,XX @@ float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
36
- tci_write_reg(regs, t0, *(int8_t *)(t1 + t2));
37
if (unlikely(!can_use_fpu(s))) {
37
- break;
38
goto soft;
38
case INDEX_op_ld16u_i64:
39
}
39
t0 = *tb_ptr++;
40
+ if (unlikely(flags & float_muladd_suppress_add_product_zero)) {
40
t1 = tci_read_r(regs, &tb_ptr);
41
+ goto soft;
42
+ }
43
44
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
45
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
46
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/fpu/softfloat-parts.c.inc
49
+++ b/fpu/softfloat-parts.c.inc
50
@@ -XXX,XX +XXX,XX @@ static FloatPartsN *partsN(muladd_scalbn)(FloatPartsN *a, FloatPartsN *b,
51
goto return_normal;
52
}
53
if (c->cls == float_class_zero) {
54
- if (a->sign != c->sign) {
55
+ if (flags & float_muladd_suppress_add_product_zero) {
56
+ a->sign = c->sign;
57
+ } else if (a->sign != c->sign) {
58
goto return_sub_zero;
59
}
60
goto return_zero;
41
--
61
--
42
2.25.1
62
2.43.0
43
44
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
There are no special cases for this instruction.
2
Remove internal_mpyf as unused.
2
3
3
commit 568496c0c0f1 ("cpu: Add callback to check architectural") and
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
commit 3826121d9298 ("target-arm: Implement checking of fired")
5
introduced an ARM-specific hack for cpu_check_watchpoint.
6
7
Make debug_check_watchpoint optional, and move it to tcg_ops.
8
9
Signed-off-by: Claudio Fontana <cfontana@suse.de>
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Message-Id: <20210204163931.7358-15-cfontana@suse.de>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
6
---
14
include/hw/core/cpu.h | 9 ++++++---
7
target/hexagon/fma_emu.h | 1 -
15
accel/tcg/user-exec.c | 3 ++-
8
target/hexagon/fma_emu.c | 8 --------
16
hw/core/cpu.c | 9 ---------
9
target/hexagon/op_helper.c | 2 +-
17
softmmu/physmem.c | 4 ++--
10
3 files changed, 1 insertion(+), 10 deletions(-)
18
target/arm/cpu.c | 4 ++--
19
5 files changed, 12 insertions(+), 17 deletions(-)
20
11
21
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
12
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
22
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
23
--- a/include/hw/core/cpu.h
14
--- a/target/hexagon/fma_emu.h
24
+++ b/include/hw/core/cpu.h
15
+++ b/target/hexagon/fma_emu.h
25
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
16
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32);
26
*/
17
float32 infinite_float32(uint8_t sign);
27
vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
18
float32 internal_fmafx(float32 a, float32 b, float32 c,
28
19
int scale, float_status *fp_status);
29
+ /**
20
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status);
30
+ * @debug_check_watchpoint: return true if the architectural
21
float64 internal_mpyhh(float64 a, float64 b,
31
+ * watchpoint whose address has matched should really fire, used by ARM
22
unsigned long long int accumulated,
32
+ */
23
float_status *fp_status);
33
+ bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
24
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
34
+
35
} TcgCpuOperations;
36
37
/**
38
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
39
* a memory access with the specified memory transaction attributes.
40
* @gdb_read_register: Callback for letting GDB read a register.
41
* @gdb_write_register: Callback for letting GDB write a register.
42
- * @debug_check_watchpoint: Callback: return true if the architectural
43
- * watchpoint whose address has matched should really fire.
44
* @write_elf64_note: Callback for writing a CPU-specific ELF note to a
45
* 64-bit VM coredump.
46
* @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
47
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
48
int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
49
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
50
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
51
- bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
52
53
int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
54
int cpuid, void *opaque);
55
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
56
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
57
--- a/accel/tcg/user-exec.c
26
--- a/target/hexagon/fma_emu.c
58
+++ b/accel/tcg/user-exec.c
27
+++ b/target/hexagon/fma_emu.c
59
@@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
28
@@ -XXX,XX +XXX,XX @@ float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
60
clear_helper_retaddr();
29
return accum_round_float32(result, fp_status);
61
62
cc = CPU_GET_CLASS(cpu);
63
- cc->tcg_ops.tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
64
+ cc->tcg_ops.tlb_fill(cpu, address, 0, access_type,
65
+ MMU_USER_IDX, false, pc);
66
g_assert_not_reached();
67
}
30
}
68
31
69
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
32
-float32 internal_mpyf(float32 a, float32 b, float_status *fp_status)
70
index XXXXXXX..XXXXXXX 100644
71
--- a/hw/core/cpu.c
72
+++ b/hw/core/cpu.c
73
@@ -XXX,XX +XXX,XX @@ static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
74
return 0;
75
}
76
77
-static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
78
-{
33
-{
79
- /* If no extra check is required, QEMU watchpoint match can be considered
34
- if (float32_is_zero(a) || float32_is_zero(b)) {
80
- * as an architectural match.
35
- return float32_mul(a, b, fp_status);
81
- */
36
- }
82
- return true;
37
- return internal_fmafx(a, b, float32_zero, 0, fp_status);
83
-}
38
-}
84
-
39
-
85
static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
40
float64 internal_mpyhh(float64 a, float64 b,
41
unsigned long long int accumulated,
42
float_status *fp_status)
43
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/hexagon/op_helper.c
46
+++ b/target/hexagon/op_helper.c
47
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sfmpy)(CPUHexagonState *env, float32 RsV, float32 RtV)
86
{
48
{
87
return target_words_bigendian();
49
float32 RdV;
88
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
50
arch_fpop_start(env);
89
k->gdb_read_register = cpu_common_gdb_read_register;
51
- RdV = internal_mpyf(RsV, RtV, &env->fp_status);
90
k->gdb_write_register = cpu_common_gdb_write_register;
52
+ RdV = float32_mul(RsV, RtV, &env->fp_status);
91
k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
53
arch_fpop_end(env);
92
- k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
54
return RdV;
93
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
94
dc->realize = cpu_common_realizefn;
95
dc->unrealize = cpu_common_unrealizefn;
96
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/softmmu/physmem.c
99
+++ b/softmmu/physmem.c
100
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
101
wp->hitaddr = MAX(addr, wp->vaddr);
102
wp->hitattrs = attrs;
103
if (!cpu->watchpoint_hit) {
104
- if (wp->flags & BP_CPU &&
105
- !cc->debug_check_watchpoint(cpu, wp)) {
106
+ if (wp->flags & BP_CPU && cc->tcg_ops.debug_check_watchpoint &&
107
+ !cc->tcg_ops.debug_check_watchpoint(cpu, wp)) {
108
wp->flags &= ~BP_WATCHPOINT_HIT;
109
continue;
110
}
111
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
112
index XXXXXXX..XXXXXXX 100644
113
--- a/target/arm/cpu.c
114
+++ b/target/arm/cpu.c
115
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
116
cc->tcg_ops.synchronize_from_tb = arm_cpu_synchronize_from_tb;
117
cc->tcg_ops.tlb_fill = arm_cpu_tlb_fill;
118
cc->tcg_ops.debug_excp_handler = arm_debug_excp_handler;
119
- cc->debug_check_watchpoint = arm_debug_check_watchpoint;
120
#if !defined(CONFIG_USER_ONLY)
121
+ cc->tcg_ops.do_interrupt = arm_cpu_do_interrupt;
122
cc->tcg_ops.do_transaction_failed = arm_cpu_do_transaction_failed;
123
cc->tcg_ops.do_unaligned_access = arm_cpu_do_unaligned_access;
124
cc->tcg_ops.adjust_watchpoint_address = arm_adjust_watchpoint_address;
125
- cc->tcg_ops.do_interrupt = arm_cpu_do_interrupt;
126
+ cc->tcg_ops.debug_check_watchpoint = arm_debug_check_watchpoint;
127
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
128
#endif /* CONFIG_TCG */
129
}
55
}
130
--
56
--
131
2.25.1
57
2.43.0
132
133
diff view generated by jsdifflib
1
Tested-by: Alex Bennée <alex.bennee@linaro.org>
1
There are no special cases for this instruction.
2
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
2
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
---
5
---
5
tcg/tci.c | 20 +++++++++++++-------
6
target/hexagon/op_helper.c | 2 +-
6
1 file changed, 13 insertions(+), 7 deletions(-)
7
1 file changed, 1 insertion(+), 1 deletion(-)
7
8
8
diff --git a/tcg/tci.c b/tcg/tci.c
9
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
9
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
10
--- a/tcg/tci.c
11
--- a/target/hexagon/op_helper.c
11
+++ b/tcg/tci.c
12
+++ b/target/hexagon/op_helper.c
12
@@ -XXX,XX +XXX,XX @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
13
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
13
# define qemu_st_beq(X) stq_be_p(g2h(taddr), X)
14
float32 RsV, float32 RtV)
14
#endif
15
{
15
16
arch_fpop_start(env);
16
+#if TCG_TARGET_REG_BITS == 64
17
- RxV = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
17
+# define CASE_32_64(x) \
18
+ RxV = float32_muladd(RsV, RtV, RxV, 0, &env->fp_status);
18
+ case glue(glue(INDEX_op_, x), _i64): \
19
arch_fpop_end(env);
19
+ case glue(glue(INDEX_op_, x), _i32):
20
return RxV;
20
+# define CASE_64(x) \
21
}
21
+ case glue(glue(INDEX_op_, x), _i64):
22
+#else
23
+# define CASE_32_64(x) \
24
+ case glue(glue(INDEX_op_, x), _i32):
25
+# define CASE_64(x)
26
+#endif
27
+
28
/* Interpret pseudo code in tb. */
29
/*
30
* Disable CFI checks.
31
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
32
33
/* Load/store operations (32 bit). */
34
35
- case INDEX_op_ld8u_i32:
36
+ CASE_32_64(ld8u)
37
t0 = *tb_ptr++;
38
t1 = tci_read_r(regs, &tb_ptr);
39
t2 = tci_read_s32(&tb_ptr);
40
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
41
42
/* Load/store operations (64 bit). */
43
44
- case INDEX_op_ld8u_i64:
45
- t0 = *tb_ptr++;
46
- t1 = tci_read_r(regs, &tb_ptr);
47
- t2 = tci_read_s32(&tb_ptr);
48
- tci_write_reg(regs, t0, *(uint8_t *)(t1 + t2));
49
- break;
50
case INDEX_op_ld8s_i64:
51
t0 = *tb_ptr++;
52
t1 = tci_read_r(regs, &tb_ptr);
53
--
22
--
54
2.25.1
23
2.43.0
55
56
diff view generated by jsdifflib
1
Note that we had two functions of the same name: a 32-bit version
1
There are no special cases for this instruction. Since hexagon
2
which took two register numbers and a 64-bit version which was a
2
always uses default-nan mode, explicitly negating the first
3
no-op wrapper for tcg_write_reg. After this, we are left with
3
input is unnecessary. Use float_muladd_negate_product instead.
4
only the 32-bit version.
5
4
6
Tested-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
tcg/tci.c | 60 +++++++++++++++++++++++++------------------------------
8
target/hexagon/op_helper.c | 5 ++---
11
1 file changed, 27 insertions(+), 33 deletions(-)
9
1 file changed, 2 insertions(+), 3 deletions(-)
12
10
13
diff --git a/tcg/tci.c b/tcg/tci.c
11
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
14
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
15
--- a/tcg/tci.c
13
--- a/target/hexagon/op_helper.c
16
+++ b/tcg/tci.c
14
+++ b/target/hexagon/op_helper.c
17
@@ -XXX,XX +XXX,XX @@ static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
15
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
18
tci_write_reg(regs, low_index, value);
16
float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
19
tci_write_reg(regs, high_index, value >> 32);
17
float32 RsV, float32 RtV)
18
{
19
- float32 neg_RsV;
20
arch_fpop_start(env);
21
- neg_RsV = float32_set_sign(RsV, float32_is_neg(RsV) ? 0 : 1);
22
- RxV = internal_fmafx(neg_RsV, RtV, RxV, 0, &env->fp_status);
23
+ RxV = float32_muladd(RsV, RtV, RxV, float_muladd_negate_product,
24
+ &env->fp_status);
25
arch_fpop_end(env);
26
return RxV;
20
}
27
}
21
-#elif TCG_TARGET_REG_BITS == 64
22
-static void
23
-tci_write_reg64(tcg_target_ulong *regs, TCGReg index, uint64_t value)
24
-{
25
- tci_write_reg(regs, index, value);
26
-}
27
#endif
28
29
#if TCG_TARGET_REG_BITS == 32
30
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
31
t1 = tci_read_r64(regs, &tb_ptr);
32
t2 = tci_read_ri64(regs, &tb_ptr);
33
condition = *tb_ptr++;
34
- tci_write_reg64(regs, t0, tci_compare64(t1, t2, condition));
35
+ tci_write_reg(regs, t0, tci_compare64(t1, t2, condition));
36
break;
37
#endif
38
case INDEX_op_mov_i32:
39
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
40
case INDEX_op_mov_i64:
41
t0 = *tb_ptr++;
42
t1 = tci_read_r64(regs, &tb_ptr);
43
- tci_write_reg64(regs, t0, t1);
44
+ tci_write_reg(regs, t0, t1);
45
break;
46
case INDEX_op_tci_movi_i64:
47
t0 = *tb_ptr++;
48
t1 = tci_read_i64(&tb_ptr);
49
- tci_write_reg64(regs, t0, t1);
50
+ tci_write_reg(regs, t0, t1);
51
break;
52
53
/* Load/store operations (64 bit). */
54
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
55
t0 = *tb_ptr++;
56
t1 = tci_read_r(regs, &tb_ptr);
57
t2 = tci_read_s32(&tb_ptr);
58
- tci_write_reg64(regs, t0, *(uint64_t *)(t1 + t2));
59
+ tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2));
60
break;
61
case INDEX_op_st8_i64:
62
t0 = tci_read_r8(regs, &tb_ptr);
63
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
64
t0 = *tb_ptr++;
65
t1 = tci_read_ri64(regs, &tb_ptr);
66
t2 = tci_read_ri64(regs, &tb_ptr);
67
- tci_write_reg64(regs, t0, t1 + t2);
68
+ tci_write_reg(regs, t0, t1 + t2);
69
break;
70
case INDEX_op_sub_i64:
71
t0 = *tb_ptr++;
72
t1 = tci_read_ri64(regs, &tb_ptr);
73
t2 = tci_read_ri64(regs, &tb_ptr);
74
- tci_write_reg64(regs, t0, t1 - t2);
75
+ tci_write_reg(regs, t0, t1 - t2);
76
break;
77
case INDEX_op_mul_i64:
78
t0 = *tb_ptr++;
79
t1 = tci_read_ri64(regs, &tb_ptr);
80
t2 = tci_read_ri64(regs, &tb_ptr);
81
- tci_write_reg64(regs, t0, t1 * t2);
82
+ tci_write_reg(regs, t0, t1 * t2);
83
break;
84
#if TCG_TARGET_HAS_div_i64
85
case INDEX_op_div_i64:
86
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
87
t0 = *tb_ptr++;
88
t1 = tci_read_ri64(regs, &tb_ptr);
89
t2 = tci_read_ri64(regs, &tb_ptr);
90
- tci_write_reg64(regs, t0, t1 & t2);
91
+ tci_write_reg(regs, t0, t1 & t2);
92
break;
93
case INDEX_op_or_i64:
94
t0 = *tb_ptr++;
95
t1 = tci_read_ri64(regs, &tb_ptr);
96
t2 = tci_read_ri64(regs, &tb_ptr);
97
- tci_write_reg64(regs, t0, t1 | t2);
98
+ tci_write_reg(regs, t0, t1 | t2);
99
break;
100
case INDEX_op_xor_i64:
101
t0 = *tb_ptr++;
102
t1 = tci_read_ri64(regs, &tb_ptr);
103
t2 = tci_read_ri64(regs, &tb_ptr);
104
- tci_write_reg64(regs, t0, t1 ^ t2);
105
+ tci_write_reg(regs, t0, t1 ^ t2);
106
break;
107
108
/* Shift/rotate operations (64 bit). */
109
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
110
t0 = *tb_ptr++;
111
t1 = tci_read_ri64(regs, &tb_ptr);
112
t2 = tci_read_ri64(regs, &tb_ptr);
113
- tci_write_reg64(regs, t0, t1 << (t2 & 63));
114
+ tci_write_reg(regs, t0, t1 << (t2 & 63));
115
break;
116
case INDEX_op_shr_i64:
117
t0 = *tb_ptr++;
118
t1 = tci_read_ri64(regs, &tb_ptr);
119
t2 = tci_read_ri64(regs, &tb_ptr);
120
- tci_write_reg64(regs, t0, t1 >> (t2 & 63));
121
+ tci_write_reg(regs, t0, t1 >> (t2 & 63));
122
break;
123
case INDEX_op_sar_i64:
124
t0 = *tb_ptr++;
125
t1 = tci_read_ri64(regs, &tb_ptr);
126
t2 = tci_read_ri64(regs, &tb_ptr);
127
- tci_write_reg64(regs, t0, ((int64_t)t1 >> (t2 & 63)));
128
+ tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63)));
129
break;
130
#if TCG_TARGET_HAS_rot_i64
131
case INDEX_op_rotl_i64:
132
t0 = *tb_ptr++;
133
t1 = tci_read_ri64(regs, &tb_ptr);
134
t2 = tci_read_ri64(regs, &tb_ptr);
135
- tci_write_reg64(regs, t0, rol64(t1, t2 & 63));
136
+ tci_write_reg(regs, t0, rol64(t1, t2 & 63));
137
break;
138
case INDEX_op_rotr_i64:
139
t0 = *tb_ptr++;
140
t1 = tci_read_ri64(regs, &tb_ptr);
141
t2 = tci_read_ri64(regs, &tb_ptr);
142
- tci_write_reg64(regs, t0, ror64(t1, t2 & 63));
143
+ tci_write_reg(regs, t0, ror64(t1, t2 & 63));
144
break;
145
#endif
146
#if TCG_TARGET_HAS_deposit_i64
147
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
148
tmp16 = *tb_ptr++;
149
tmp8 = *tb_ptr++;
150
tmp64 = (((1ULL << tmp8) - 1) << tmp16);
151
- tci_write_reg64(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
152
+ tci_write_reg(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
153
break;
154
#endif
155
case INDEX_op_brcond_i64:
156
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
157
case INDEX_op_ext8u_i64:
158
t0 = *tb_ptr++;
159
t1 = tci_read_r8(regs, &tb_ptr);
160
- tci_write_reg64(regs, t0, t1);
161
+ tci_write_reg(regs, t0, t1);
162
break;
163
#endif
164
#if TCG_TARGET_HAS_ext8s_i64
165
case INDEX_op_ext8s_i64:
166
t0 = *tb_ptr++;
167
t1 = tci_read_r8s(regs, &tb_ptr);
168
- tci_write_reg64(regs, t0, t1);
169
+ tci_write_reg(regs, t0, t1);
170
break;
171
#endif
172
#if TCG_TARGET_HAS_ext16s_i64
173
case INDEX_op_ext16s_i64:
174
t0 = *tb_ptr++;
175
t1 = tci_read_r16s(regs, &tb_ptr);
176
- tci_write_reg64(regs, t0, t1);
177
+ tci_write_reg(regs, t0, t1);
178
break;
179
#endif
180
#if TCG_TARGET_HAS_ext16u_i64
181
case INDEX_op_ext16u_i64:
182
t0 = *tb_ptr++;
183
t1 = tci_read_r16(regs, &tb_ptr);
184
- tci_write_reg64(regs, t0, t1);
185
+ tci_write_reg(regs, t0, t1);
186
break;
187
#endif
188
#if TCG_TARGET_HAS_ext32s_i64
189
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
190
case INDEX_op_ext_i32_i64:
191
t0 = *tb_ptr++;
192
t1 = tci_read_r32s(regs, &tb_ptr);
193
- tci_write_reg64(regs, t0, t1);
194
+ tci_write_reg(regs, t0, t1);
195
break;
196
#if TCG_TARGET_HAS_ext32u_i64
197
case INDEX_op_ext32u_i64:
198
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
199
case INDEX_op_extu_i32_i64:
200
t0 = *tb_ptr++;
201
t1 = tci_read_r32(regs, &tb_ptr);
202
- tci_write_reg64(regs, t0, t1);
203
+ tci_write_reg(regs, t0, t1);
204
break;
205
#if TCG_TARGET_HAS_bswap16_i64
206
case INDEX_op_bswap16_i64:
207
t0 = *tb_ptr++;
208
t1 = tci_read_r16(regs, &tb_ptr);
209
- tci_write_reg64(regs, t0, bswap16(t1));
210
+ tci_write_reg(regs, t0, bswap16(t1));
211
break;
212
#endif
213
#if TCG_TARGET_HAS_bswap32_i64
214
case INDEX_op_bswap32_i64:
215
t0 = *tb_ptr++;
216
t1 = tci_read_r32(regs, &tb_ptr);
217
- tci_write_reg64(regs, t0, bswap32(t1));
218
+ tci_write_reg(regs, t0, bswap32(t1));
219
break;
220
#endif
221
#if TCG_TARGET_HAS_bswap64_i64
222
case INDEX_op_bswap64_i64:
223
t0 = *tb_ptr++;
224
t1 = tci_read_r64(regs, &tb_ptr);
225
- tci_write_reg64(regs, t0, bswap64(t1));
226
+ tci_write_reg(regs, t0, bswap64(t1));
227
break;
228
#endif
229
#if TCG_TARGET_HAS_not_i64
230
case INDEX_op_not_i64:
231
t0 = *tb_ptr++;
232
t1 = tci_read_r64(regs, &tb_ptr);
233
- tci_write_reg64(regs, t0, ~t1);
234
+ tci_write_reg(regs, t0, ~t1);
235
break;
236
#endif
237
#if TCG_TARGET_HAS_neg_i64
238
case INDEX_op_neg_i64:
239
t0 = *tb_ptr++;
240
t1 = tci_read_r64(regs, &tb_ptr);
241
- tci_write_reg64(regs, t0, -t1);
242
+ tci_write_reg(regs, t0, -t1);
243
break;
244
#endif
245
#endif /* TCG_TARGET_REG_BITS == 64 */
246
--
28
--
247
2.25.1
29
2.43.0
248
249
diff view generated by jsdifflib
1
From: Stefan Weil <sw@weilnetz.de>
1
This instruction has a special case that 0 * x + c returns c
2
without the normal sign folding that comes with 0 + -0.
3
Use the new float_muladd_suppress_add_product_zero to
4
describe this.
2
5
3
That TCG opcode is used by debian-buster (arm64) running ffmpeg:
6
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
qemu-aarch64 /usr/bin/ffmpeg -i theora.mkv theora.webm
6
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reported-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Stefan Weil <sw@weilnetz.de>
11
Message-Id: <20210128020425.2055454-1-sw@weilnetz.de>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
8
---
14
tcg/tci.c | 5 ++++-
9
target/hexagon/op_helper.c | 11 +++--------
15
1 file changed, 4 insertions(+), 1 deletion(-)
10
1 file changed, 3 insertions(+), 8 deletions(-)
16
11
17
diff --git a/tcg/tci.c b/tcg/tci.c
12
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
18
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tci.c
14
--- a/target/hexagon/op_helper.c
20
+++ b/tcg/tci.c
15
+++ b/target/hexagon/op_helper.c
21
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
16
@@ -XXX,XX +XXX,XX @@ static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
22
tci_write_reg8(regs, t0, *(uint8_t *)(t1 + t2));
17
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
23
break;
18
float32 RsV, float32 RtV, float32 PuV)
24
case INDEX_op_ld8s_i64:
19
{
25
- TODO();
20
- size4s_t tmp;
26
+ t0 = *tb_ptr++;
21
arch_fpop_start(env);
27
+ t1 = tci_read_r(regs, &tb_ptr);
22
- RxV = check_nan(RxV, RxV, &env->fp_status);
28
+ t2 = tci_read_s32(&tb_ptr);
23
- RxV = check_nan(RxV, RsV, &env->fp_status);
29
+ tci_write_reg(regs, t0, *(int8_t *)(t1 + t2));
24
- RxV = check_nan(RxV, RtV, &env->fp_status);
30
break;
25
- tmp = internal_fmafx(RsV, RtV, RxV, fSXTN(8, 64, PuV), &env->fp_status);
31
case INDEX_op_ld16u_i64:
26
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
32
t0 = *tb_ptr++;
27
- RxV = tmp;
28
- }
29
+ RxV = float32_muladd_scalbn(RsV, RtV, RxV, fSXTN(8, 64, PuV),
30
+ float_muladd_suppress_add_product_zero,
31
+ &env->fp_status);
32
arch_fpop_end(env);
33
return RxV;
34
}
33
--
35
--
34
2.25.1
36
2.43.0
35
36
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
There are multiple special cases for this instruction.
2
(1) The saturate to normal maximum instead of overflow to infinity is
3
handled by the new float_round_nearest_even_max rounding mode.
4
(2) The 0 * n + c special case is handled by the new
5
float_muladd_suppress_add_product_zero flag.
6
(3) The Inf - Inf -> 0 special case can be detected after the fact
7
by examining float_flag_invalid_isi.
2
8
3
make it consistently SOFTMMU-only.
9
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
Signed-off-by: Claudio Fontana <cfontana@suse.de>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
10
[claudio: make the field presence in cpu.h unconditional, removing the ifdefs]
11
Message-Id: <20210204163931.7358-12-cfontana@suse.de>
12
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
11
---
15
include/hw/core/cpu.h | 13 +++++++------
12
target/hexagon/op_helper.c | 105 +++++++++----------------------------
16
target/alpha/cpu.c | 2 +-
13
1 file changed, 26 insertions(+), 79 deletions(-)
17
target/arm/cpu.c | 2 +-
18
target/hppa/cpu.c | 4 +++-
19
target/microblaze/cpu.c | 2 +-
20
target/mips/cpu.c | 3 ++-
21
target/nios2/cpu.c | 2 +-
22
target/riscv/cpu.c | 2 +-
23
target/s390x/cpu.c | 2 +-
24
target/s390x/excp_helper.c | 2 +-
25
target/sh4/cpu.c | 2 +-
26
target/sparc/cpu.c | 2 +-
27
target/xtensa/cpu.c | 2 +-
28
target/ppc/translate_init.c.inc | 2 +-
29
14 files changed, 23 insertions(+), 19 deletions(-)
30
14
31
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
15
diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c
32
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
33
--- a/include/hw/core/cpu.h
17
--- a/target/hexagon/op_helper.c
34
+++ b/include/hw/core/cpu.h
18
+++ b/target/hexagon/op_helper.c
35
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
19
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffma)(CPUHexagonState *env, float32 RxV,
36
unsigned size, MMUAccessType access_type,
20
return RxV;
37
int mmu_idx, MemTxAttrs attrs,
21
}
38
MemTxResult response, uintptr_t retaddr);
22
39
+ /**
23
-static bool is_zero_prod(float32 a, float32 b)
40
+ * @do_unaligned_access: Callback for unaligned access handling
24
-{
41
+ */
25
- return ((float32_is_zero(a) && is_finite(b)) ||
42
+ void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
26
- (float32_is_zero(b) && is_finite(a)));
43
+ MMUAccessType access_type,
27
-}
44
+ int mmu_idx, uintptr_t retaddr);
28
-
45
} TcgCpuOperations;
29
-static float32 check_nan(float32 dst, float32 x, float_status *fp_status)
46
30
-{
47
/**
31
- float32 ret = dst;
48
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
32
- if (float32_is_any_nan(x)) {
49
* @parse_features: Callback to parse command line arguments.
33
- if (extract32(x, 22, 1) == 0) {
50
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
34
- float_raise(float_flag_invalid, fp_status);
51
* @has_work: Callback for checking if there is work to do.
35
- }
52
- * @do_unaligned_access: Callback for unaligned access handling, if
36
- ret = make_float32(0xffffffff); /* nan */
53
- * the target defines #TARGET_ALIGNED_ONLY.
37
- }
54
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
38
- return ret;
55
* runtime configurable endianness is currently big-endian. Non-configurable
39
-}
56
* CPUs can use the default implementation of this method. This method should
40
-
57
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
41
float32 HELPER(sffma_sc)(CPUHexagonState *env, float32 RxV,
58
42
float32 RsV, float32 RtV, float32 PuV)
59
int reset_dump_flags;
60
bool (*has_work)(CPUState *cpu);
61
- void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
62
- MMUAccessType access_type,
63
- int mmu_idx, uintptr_t retaddr);
64
bool (*virtio_is_big_endian)(CPUState *cpu);
65
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
66
uint8_t *buf, int len, bool is_write);
67
@@ -XXX,XX +XXX,XX @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
68
{
43
{
69
CPUClass *cc = CPU_GET_CLASS(cpu);
44
@@ -XXX,XX +XXX,XX @@ float32 HELPER(sffms)(CPUHexagonState *env, float32 RxV,
70
45
return RxV;
71
- cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
72
+ cc->tcg_ops.do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
73
}
46
}
74
47
75
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
48
-static bool is_inf_prod(int32_t a, int32_t b)
76
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
49
+static float32 do_sffma_lib(CPUHexagonState *env, float32 RxV,
77
index XXXXXXX..XXXXXXX 100644
50
+ float32 RsV, float32 RtV, int negate)
78
--- a/target/alpha/cpu.c
51
{
79
+++ b/target/alpha/cpu.c
52
- return (float32_is_infinity(a) && float32_is_infinity(b)) ||
80
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
53
- (float32_is_infinity(a) && is_finite(b) && !float32_is_zero(b)) ||
81
cc->tcg_ops.tlb_fill = alpha_cpu_tlb_fill;
54
- (float32_is_infinity(b) && is_finite(a) && !float32_is_zero(a));
82
#ifndef CONFIG_USER_ONLY
55
+ int flags;
83
cc->tcg_ops.do_transaction_failed = alpha_cpu_do_transaction_failed;
56
+
84
- cc->do_unaligned_access = alpha_cpu_do_unaligned_access;
57
+ arch_fpop_start(env);
85
+ cc->tcg_ops.do_unaligned_access = alpha_cpu_do_unaligned_access;
58
+
86
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
59
+ set_float_rounding_mode(float_round_nearest_even_max, &env->fp_status);
87
dc->vmsd = &vmstate_alpha_cpu;
60
+ RxV = float32_muladd(RsV, RtV, RxV,
88
#endif
61
+ negate | float_muladd_suppress_add_product_zero,
89
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
62
+ &env->fp_status);
90
index XXXXXXX..XXXXXXX 100644
63
+
91
--- a/target/arm/cpu.c
64
+ flags = get_float_exception_flags(&env->fp_status);
92
+++ b/target/arm/cpu.c
65
+ if (flags) {
93
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
66
+ /* Flags are suppressed by this instruction. */
94
cc->tcg_ops.tlb_fill = arm_cpu_tlb_fill;
67
+ set_float_exception_flags(0, &env->fp_status);
95
cc->tcg_ops.debug_excp_handler = arm_debug_excp_handler;
68
+
96
cc->debug_check_watchpoint = arm_debug_check_watchpoint;
69
+ /* Return 0 for Inf - Inf. */
97
- cc->do_unaligned_access = arm_cpu_do_unaligned_access;
70
+ if (flags & float_flag_invalid_isi) {
98
#if !defined(CONFIG_USER_ONLY)
71
+ RxV = 0;
99
cc->tcg_ops.do_transaction_failed = arm_cpu_do_transaction_failed;
72
+ }
100
+ cc->tcg_ops.do_unaligned_access = arm_cpu_do_unaligned_access;
73
+ }
101
cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
74
+
102
cc->tcg_ops.do_interrupt = arm_cpu_do_interrupt;
75
+ arch_fpop_end(env);
103
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
76
+ return RxV;
104
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/target/hppa/cpu.c
107
+++ b/target/hppa/cpu.c
108
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
109
info->print_insn = print_insn_hppa;
110
}
77
}
111
78
112
+#ifndef CONFIG_USER_ONLY
79
float32 HELPER(sffma_lib)(CPUHexagonState *env, float32 RxV,
113
static void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
80
float32 RsV, float32 RtV)
114
MMUAccessType access_type,
81
{
115
int mmu_idx, uintptr_t retaddr)
82
- bool infinp;
116
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
83
- bool infminusinf;
117
84
- float32 tmp;
118
cpu_loop_exit_restore(cs, retaddr);
85
-
86
- arch_fpop_start(env);
87
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
88
- infminusinf = float32_is_infinity(RxV) &&
89
- is_inf_prod(RsV, RtV) &&
90
- (fGETBIT(31, RsV ^ RxV ^ RtV) != 0);
91
- infinp = float32_is_infinity(RxV) ||
92
- float32_is_infinity(RtV) ||
93
- float32_is_infinity(RsV);
94
- RxV = check_nan(RxV, RxV, &env->fp_status);
95
- RxV = check_nan(RxV, RsV, &env->fp_status);
96
- RxV = check_nan(RxV, RtV, &env->fp_status);
97
- tmp = internal_fmafx(RsV, RtV, RxV, 0, &env->fp_status);
98
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
99
- RxV = tmp;
100
- }
101
- set_float_exception_flags(0, &env->fp_status);
102
- if (float32_is_infinity(RxV) && !infinp) {
103
- RxV = RxV - 1;
104
- }
105
- if (infminusinf) {
106
- RxV = 0;
107
- }
108
- arch_fpop_end(env);
109
- return RxV;
110
+ return do_sffma_lib(env, RxV, RsV, RtV, 0);
119
}
111
}
120
+#endif /* CONFIG_USER_ONLY */
112
121
113
float32 HELPER(sffms_lib)(CPUHexagonState *env, float32 RxV,
122
static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
114
float32 RsV, float32 RtV)
123
{
115
{
124
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
116
- bool infinp;
125
cc->tcg_ops.tlb_fill = hppa_cpu_tlb_fill;
117
- bool infminusinf;
126
#ifndef CONFIG_USER_ONLY
118
- float32 tmp;
127
cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
119
-
128
+ cc->tcg_ops.do_unaligned_access = hppa_cpu_do_unaligned_access;
120
- arch_fpop_start(env);
129
dc->vmsd = &vmstate_hppa_cpu;
121
- set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
130
#endif
122
- infminusinf = float32_is_infinity(RxV) &&
131
- cc->do_unaligned_access = hppa_cpu_do_unaligned_access;
123
- is_inf_prod(RsV, RtV) &&
132
cc->disas_set_info = hppa_cpu_disas_set_info;
124
- (fGETBIT(31, RsV ^ RxV ^ RtV) == 0);
133
cc->tcg_ops.initialize = hppa_translate_init;
125
- infinp = float32_is_infinity(RxV) ||
134
126
- float32_is_infinity(RtV) ||
135
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
127
- float32_is_infinity(RsV);
136
index XXXXXXX..XXXXXXX 100644
128
- RxV = check_nan(RxV, RxV, &env->fp_status);
137
--- a/target/microblaze/cpu.c
129
- RxV = check_nan(RxV, RsV, &env->fp_status);
138
+++ b/target/microblaze/cpu.c
130
- RxV = check_nan(RxV, RtV, &env->fp_status);
139
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
131
- float32 minus_RsV = float32_sub(float32_zero, RsV, &env->fp_status);
140
cc->class_by_name = mb_cpu_class_by_name;
132
- tmp = internal_fmafx(minus_RsV, RtV, RxV, 0, &env->fp_status);
141
cc->has_work = mb_cpu_has_work;
133
- if (!(float32_is_zero(RxV) && is_zero_prod(RsV, RtV))) {
142
cc->tcg_ops.do_interrupt = mb_cpu_do_interrupt;
134
- RxV = tmp;
143
- cc->do_unaligned_access = mb_cpu_do_unaligned_access;
135
- }
144
cc->tcg_ops.cpu_exec_interrupt = mb_cpu_exec_interrupt;
136
- set_float_exception_flags(0, &env->fp_status);
145
cc->dump_state = mb_cpu_dump_state;
137
- if (float32_is_infinity(RxV) && !infinp) {
146
cc->set_pc = mb_cpu_set_pc;
138
- RxV = RxV - 1;
147
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
139
- }
148
cc->tcg_ops.tlb_fill = mb_cpu_tlb_fill;
140
- if (infminusinf) {
149
#ifndef CONFIG_USER_ONLY
141
- RxV = 0;
150
cc->tcg_ops.do_transaction_failed = mb_cpu_transaction_failed;
142
- }
151
+ cc->tcg_ops.do_unaligned_access = mb_cpu_do_unaligned_access;
143
- arch_fpop_end(env);
152
cc->get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug;
144
- return RxV;
153
dc->vmsd = &vmstate_mb_cpu;
145
+ return do_sffma_lib(env, RxV, RsV, RtV, float_muladd_negate_product);
154
#endif
155
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
156
index XXXXXXX..XXXXXXX 100644
157
--- a/target/mips/cpu.c
158
+++ b/target/mips/cpu.c
159
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
160
cc->gdb_read_register = mips_cpu_gdb_read_register;
161
cc->gdb_write_register = mips_cpu_gdb_write_register;
162
#ifndef CONFIG_USER_ONLY
163
- cc->do_unaligned_access = mips_cpu_do_unaligned_access;
164
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
165
cc->vmsd = &vmstate_mips_cpu;
166
#endif
167
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
168
cc->tcg_ops.tlb_fill = mips_cpu_tlb_fill;
169
#ifndef CONFIG_USER_ONLY
170
cc->tcg_ops.do_transaction_failed = mips_cpu_do_transaction_failed;
171
+ cc->tcg_ops.do_unaligned_access = mips_cpu_do_unaligned_access;
172
+
173
#endif /* CONFIG_USER_ONLY */
174
#endif /* CONFIG_TCG */
175
176
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
177
index XXXXXXX..XXXXXXX 100644
178
--- a/target/nios2/cpu.c
179
+++ b/target/nios2/cpu.c
180
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
181
cc->disas_set_info = nios2_cpu_disas_set_info;
182
cc->tcg_ops.tlb_fill = nios2_cpu_tlb_fill;
183
#ifndef CONFIG_USER_ONLY
184
- cc->do_unaligned_access = nios2_cpu_do_unaligned_access;
185
+ cc->tcg_ops.do_unaligned_access = nios2_cpu_do_unaligned_access;
186
cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
187
#endif
188
cc->gdb_read_register = nios2_cpu_gdb_read_register;
189
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
190
index XXXXXXX..XXXXXXX 100644
191
--- a/target/riscv/cpu.c
192
+++ b/target/riscv/cpu.c
193
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
194
cc->disas_set_info = riscv_cpu_disas_set_info;
195
#ifndef CONFIG_USER_ONLY
196
cc->tcg_ops.do_transaction_failed = riscv_cpu_do_transaction_failed;
197
- cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
198
+ cc->tcg_ops.do_unaligned_access = riscv_cpu_do_unaligned_access;
199
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
200
/* For now, mark unmigratable: */
201
cc->vmsd = &vmstate_riscv_cpu;
202
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
203
index XXXXXXX..XXXXXXX 100644
204
--- a/target/s390x/cpu.c
205
+++ b/target/s390x/cpu.c
206
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
207
#ifdef CONFIG_TCG
208
cc->tcg_ops.cpu_exec_interrupt = s390_cpu_exec_interrupt;
209
cc->tcg_ops.debug_excp_handler = s390x_cpu_debug_excp_handler;
210
- cc->do_unaligned_access = s390x_cpu_do_unaligned_access;
211
+ cc->tcg_ops.do_unaligned_access = s390x_cpu_do_unaligned_access;
212
#endif
213
#endif
214
cc->disas_set_info = s390_cpu_disas_set_info;
215
diff --git a/target/s390x/excp_helper.c b/target/s390x/excp_helper.c
216
index XXXXXXX..XXXXXXX 100644
217
--- a/target/s390x/excp_helper.c
218
+++ b/target/s390x/excp_helper.c
219
@@ -XXX,XX +XXX,XX @@ void HELPER(monitor_call)(CPUS390XState *env, uint64_t monitor_code,
220
}
221
}
146
}
222
147
223
-#endif /* CONFIG_USER_ONLY */
148
float64 HELPER(dfmpyfix)(CPUHexagonState *env, float64 RssV, float64 RttV)
224
+#endif /* !CONFIG_USER_ONLY */
225
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/sh4/cpu.c
228
+++ b/target/sh4/cpu.c
229
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
230
cc->gdb_write_register = superh_cpu_gdb_write_register;
231
cc->tcg_ops.tlb_fill = superh_cpu_tlb_fill;
232
#ifndef CONFIG_USER_ONLY
233
- cc->do_unaligned_access = superh_cpu_do_unaligned_access;
234
+ cc->tcg_ops.do_unaligned_access = superh_cpu_do_unaligned_access;
235
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
236
#endif
237
cc->disas_set_info = superh_cpu_disas_set_info;
238
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/sparc/cpu.c
241
+++ b/target/sparc/cpu.c
242
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
243
cc->tcg_ops.tlb_fill = sparc_cpu_tlb_fill;
244
#ifndef CONFIG_USER_ONLY
245
cc->tcg_ops.do_transaction_failed = sparc_cpu_do_transaction_failed;
246
- cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
247
+ cc->tcg_ops.do_unaligned_access = sparc_cpu_do_unaligned_access;
248
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
249
cc->vmsd = &vmstate_sparc_cpu;
250
#endif
251
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/xtensa/cpu.c
254
+++ b/target/xtensa/cpu.c
255
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
256
cc->gdb_stop_before_watchpoint = true;
257
cc->tcg_ops.tlb_fill = xtensa_cpu_tlb_fill;
258
#ifndef CONFIG_USER_ONLY
259
- cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
260
+ cc->tcg_ops.do_unaligned_access = xtensa_cpu_do_unaligned_access;
261
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
262
cc->tcg_ops.do_transaction_failed = xtensa_cpu_do_transaction_failed;
263
#endif
264
diff --git a/target/ppc/translate_init.c.inc b/target/ppc/translate_init.c.inc
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/translate_init.c.inc
267
+++ b/target/ppc/translate_init.c.inc
268
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
269
cc->set_pc = ppc_cpu_set_pc;
270
cc->gdb_read_register = ppc_cpu_gdb_read_register;
271
cc->gdb_write_register = ppc_cpu_gdb_write_register;
272
- cc->do_unaligned_access = ppc_cpu_do_unaligned_access;
273
#ifndef CONFIG_USER_ONLY
274
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
275
cc->vmsd = &vmstate_ppc_cpu;
276
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
277
#ifndef CONFIG_USER_ONLY
278
cc->tcg_ops.cpu_exec_enter = ppc_cpu_exec_enter;
279
cc->tcg_ops.cpu_exec_exit = ppc_cpu_exec_exit;
280
+ cc->tcg_ops.do_unaligned_access = ppc_cpu_do_unaligned_access;
281
#endif /* !CONFIG_USER_ONLY */
282
#endif /* CONFIG_TCG */
283
284
--
149
--
285
2.25.1
150
2.43.0
286
287
diff view generated by jsdifflib
1
From: Stefan Weil <sw@weilnetz.de>
1
The function is now unused.
2
2
3
That TCG opcode is used by debian-buster (arm64) running ffmpeg:
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
5
qemu-aarch64 /usr/bin/ffmpeg -i theora.mkv theora.webm
6
7
Tested-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reported-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Stefan Weil <sw@weilnetz.de>
11
Message-Id: <20210128024814.2056958-1-sw@weilnetz.de>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
5
---
14
tcg/tci.c | 5 ++++-
6
target/hexagon/fma_emu.h | 2 -
15
1 file changed, 4 insertions(+), 1 deletion(-)
7
target/hexagon/fma_emu.c | 171 ---------------------------------------
8
2 files changed, 173 deletions(-)
16
9
17
diff --git a/tcg/tci.c b/tcg/tci.c
10
diff --git a/target/hexagon/fma_emu.h b/target/hexagon/fma_emu.h
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/tci.c
12
--- a/target/hexagon/fma_emu.h
20
+++ b/tcg/tci.c
13
+++ b/target/hexagon/fma_emu.h
21
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
14
@@ -XXX,XX +XXX,XX @@ static inline uint32_t float32_getexp_raw(float32 f32)
22
TODO();
15
}
23
break;
16
int32_t float32_getexp(float32 f32);
24
case INDEX_op_ld16s_i32:
17
float32 infinite_float32(uint8_t sign);
25
- TODO();
18
-float32 internal_fmafx(float32 a, float32 b, float32 c,
26
+ t0 = *tb_ptr++;
19
- int scale, float_status *fp_status);
27
+ t1 = tci_read_r(regs, &tb_ptr);
20
float64 internal_mpyhh(float64 a, float64 b,
28
+ t2 = tci_read_s32(&tb_ptr);
21
unsigned long long int accumulated,
29
+ tci_write_reg(regs, t0, *(int16_t *)(t1 + t2));
22
float_status *fp_status);
30
break;
23
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
31
case INDEX_op_ld_i32:
24
index XXXXXXX..XXXXXXX 100644
32
t0 = *tb_ptr++;
25
--- a/target/hexagon/fma_emu.c
26
+++ b/target/hexagon/fma_emu.c
27
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
28
return -1;
29
}
30
31
-static uint64_t float32_getmant(float32 f32)
32
-{
33
- Float a = { .i = f32 };
34
- if (float32_is_normal(f32)) {
35
- return a.mant | 1ULL << 23;
36
- }
37
- if (float32_is_zero(f32)) {
38
- return 0;
39
- }
40
- if (float32_is_denormal(f32)) {
41
- return a.mant;
42
- }
43
- return ~0ULL;
44
-}
45
-
46
int32_t float32_getexp(float32 f32)
47
{
48
Float a = { .i = f32 };
49
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
50
}
51
52
/* Return a maximum finite value with the requested sign */
53
-static float32 maxfinite_float32(uint8_t sign)
54
-{
55
- if (sign) {
56
- return make_float32(SF_MINUS_MAXF);
57
- } else {
58
- return make_float32(SF_MAXF);
59
- }
60
-}
61
-
62
-/* Return a zero value with requested sign */
63
-static float32 zero_float32(uint8_t sign)
64
-{
65
- if (sign) {
66
- return make_float32(0x80000000);
67
- } else {
68
- return float32_zero;
69
- }
70
-}
71
-
72
#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
73
static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
74
{ \
75
@@ -XXX,XX +XXX,XX @@ static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
76
}
77
78
GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
79
-GEN_XF_ROUND(float32, SF_MANTBITS, SF_INF_EXP, Float)
80
-
81
-static bool is_inf_prod(float64 a, float64 b)
82
-{
83
- return ((float64_is_infinity(a) && float64_is_infinity(b)) ||
84
- (float64_is_infinity(a) && is_finite(b) && (!float64_is_zero(b))) ||
85
- (float64_is_infinity(b) && is_finite(a) && (!float64_is_zero(a))));
86
-}
87
-
88
-static float64 special_fma(float64 a, float64 b, float64 c,
89
- float_status *fp_status)
90
-{
91
- float64 ret = make_float64(0);
92
-
93
- /*
94
- * If A multiplied by B is an exact infinity and C is also an infinity
95
- * but with the opposite sign, FMA returns NaN and raises invalid.
96
- */
97
- uint8_t a_sign = float64_is_neg(a);
98
- uint8_t b_sign = float64_is_neg(b);
99
- uint8_t c_sign = float64_is_neg(c);
100
- if (is_inf_prod(a, b) && float64_is_infinity(c)) {
101
- if ((a_sign ^ b_sign) != c_sign) {
102
- ret = make_float64(DF_NAN);
103
- float_raise(float_flag_invalid, fp_status);
104
- return ret;
105
- }
106
- }
107
- if ((float64_is_infinity(a) && float64_is_zero(b)) ||
108
- (float64_is_zero(a) && float64_is_infinity(b))) {
109
- ret = make_float64(DF_NAN);
110
- float_raise(float_flag_invalid, fp_status);
111
- return ret;
112
- }
113
- /*
114
- * If none of the above checks are true and C is a NaN,
115
- * a NaN shall be returned
116
- * If A or B are NaN, a NAN shall be returned.
117
- */
118
- if (float64_is_any_nan(a) ||
119
- float64_is_any_nan(b) ||
120
- float64_is_any_nan(c)) {
121
- if (float64_is_any_nan(a) && (fGETBIT(51, a) == 0)) {
122
- float_raise(float_flag_invalid, fp_status);
123
- }
124
- if (float64_is_any_nan(b) && (fGETBIT(51, b) == 0)) {
125
- float_raise(float_flag_invalid, fp_status);
126
- }
127
- if (float64_is_any_nan(c) && (fGETBIT(51, c) == 0)) {
128
- float_raise(float_flag_invalid, fp_status);
129
- }
130
- ret = make_float64(DF_NAN);
131
- return ret;
132
- }
133
- /*
134
- * We have checked for adding opposite-signed infinities.
135
- * Other infinities return infinity with the correct sign
136
- */
137
- if (float64_is_infinity(c)) {
138
- ret = infinite_float64(c_sign);
139
- return ret;
140
- }
141
- if (float64_is_infinity(a) || float64_is_infinity(b)) {
142
- ret = infinite_float64(a_sign ^ b_sign);
143
- return ret;
144
- }
145
- g_assert_not_reached();
146
-}
147
-
148
-static float32 special_fmaf(float32 a, float32 b, float32 c,
149
- float_status *fp_status)
150
-{
151
- float64 aa, bb, cc;
152
- aa = float32_to_float64(a, fp_status);
153
- bb = float32_to_float64(b, fp_status);
154
- cc = float32_to_float64(c, fp_status);
155
- return float64_to_float32(special_fma(aa, bb, cc, fp_status), fp_status);
156
-}
157
-
158
-float32 internal_fmafx(float32 a, float32 b, float32 c, int scale,
159
- float_status *fp_status)
160
-{
161
- Accum prod;
162
- Accum acc;
163
- Accum result;
164
- accum_init(&prod);
165
- accum_init(&acc);
166
- accum_init(&result);
167
-
168
- uint8_t a_sign = float32_is_neg(a);
169
- uint8_t b_sign = float32_is_neg(b);
170
- uint8_t c_sign = float32_is_neg(c);
171
- if (float32_is_infinity(a) ||
172
- float32_is_infinity(b) ||
173
- float32_is_infinity(c)) {
174
- return special_fmaf(a, b, c, fp_status);
175
- }
176
- if (float32_is_any_nan(a) ||
177
- float32_is_any_nan(b) ||
178
- float32_is_any_nan(c)) {
179
- return special_fmaf(a, b, c, fp_status);
180
- }
181
- if ((scale == 0) && (float32_is_zero(a) || float32_is_zero(b))) {
182
- float32 tmp = float32_mul(a, b, fp_status);
183
- tmp = float32_add(tmp, c, fp_status);
184
- return tmp;
185
- }
186
-
187
- /* (a * 2**b) * (c * 2**d) == a*c * 2**(b+d) */
188
- prod.mant = int128_mul_6464(float32_getmant(a), float32_getmant(b));
189
-
190
- /*
191
- * Note: extracting the mantissa into an int is multiplying by
192
- * 2**23, so adjust here
193
- */
194
- prod.exp = float32_getexp(a) + float32_getexp(b) - SF_BIAS - 23;
195
- prod.sign = a_sign ^ b_sign;
196
- if (float32_is_zero(a) || float32_is_zero(b)) {
197
- prod.exp = -2 * WAY_BIG_EXP;
198
- }
199
- if ((scale > 0) && float32_is_denormal(c)) {
200
- acc.mant = int128_mul_6464(0, 0);
201
- acc.exp = -WAY_BIG_EXP;
202
- acc.sign = c_sign;
203
- acc.sticky = 1;
204
- result = accum_add(prod, acc);
205
- } else if (!float32_is_zero(c)) {
206
- acc.mant = int128_mul_6464(float32_getmant(c), 1);
207
- acc.exp = float32_getexp(c);
208
- acc.sign = c_sign;
209
- result = accum_add(prod, acc);
210
- } else {
211
- result = prod;
212
- }
213
- result.exp += scale;
214
- return accum_round_float32(result, fp_status);
215
-}
216
217
float64 internal_mpyhh(float64 a, float64 b,
218
unsigned long long int accumulated,
33
--
219
--
34
2.25.1
220
2.43.0
35
36
diff view generated by jsdifflib
1
From: Eduardo Habkost <ehabkost@redhat.com>
1
This massive macro is now only used once.
2
Expand it for use only by float64.
2
3
3
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
[claudio: wrapped target code in CONFIG_TCG]
5
Signed-off-by: Claudio Fontana <cfontana@suse.de>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20210204163931.7358-6-cfontana@suse.de>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
include/hw/core/cpu.h | 12 ++++++------
7
target/hexagon/fma_emu.c | 255 +++++++++++++++++++--------------------
13
accel/tcg/cpu-exec.c | 12 ++++++------
8
1 file changed, 127 insertions(+), 128 deletions(-)
14
target/alpha/cpu.c | 2 +-
15
target/arm/cpu.c | 2 +-
16
target/arm/cpu64.c | 5 ++++-
17
target/arm/cpu_tcg.c | 7 ++++++-
18
target/avr/cpu.c | 2 +-
19
target/cris/cpu.c | 2 +-
20
target/hppa/cpu.c | 2 +-
21
target/i386/tcg/tcg-cpu.c | 6 +++---
22
target/lm32/cpu.c | 2 +-
23
target/m68k/cpu.c | 2 +-
24
target/microblaze/cpu.c | 2 +-
25
target/mips/cpu.c | 2 +-
26
target/nios2/cpu.c | 2 +-
27
target/openrisc/cpu.c | 2 +-
28
target/riscv/cpu.c | 2 +-
29
target/rx/cpu.c | 2 +-
30
target/s390x/cpu.c | 2 +-
31
target/sh4/cpu.c | 2 +-
32
target/sparc/cpu.c | 2 +-
33
target/tilegx/cpu.c | 2 +-
34
target/unicore32/cpu.c | 2 +-
35
target/xtensa/cpu.c | 2 +-
36
target/ppc/translate_init.c.inc | 16 ++++++++++------
37
25 files changed, 54 insertions(+), 42 deletions(-)
38
9
39
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
40
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
41
--- a/include/hw/core/cpu.h
12
--- a/target/hexagon/fma_emu.c
42
+++ b/include/hw/core/cpu.h
13
+++ b/target/hexagon/fma_emu.c
43
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
14
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
44
*/
45
void (*synchronize_from_tb)(CPUState *cpu,
46
const struct TranslationBlock *tb);
47
+ /** @cpu_exec_enter: Callback for cpu_exec preparation */
48
+ void (*cpu_exec_enter)(CPUState *cpu);
49
+ /** @cpu_exec_exit: Callback for cpu_exec cleanup */
50
+ void (*cpu_exec_exit)(CPUState *cpu);
51
+ /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
52
+ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
53
54
} TcgCpuOperations;
55
56
@@ -XXX,XX +XXX,XX @@ typedef struct TcgCpuOperations {
57
* @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
58
* gdb stub. Returns a pointer to the XML contents for the specified XML file
59
* or NULL if the CPU doesn't have a dynamically generated content for it.
60
- * @cpu_exec_enter: Callback for cpu_exec preparation.
61
- * @cpu_exec_exit: Callback for cpu_exec cleanup.
62
- * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
63
* @disas_set_info: Setup architecture specific components of disassembly info
64
* @adjust_watchpoint_address: Perform a target-specific adjustment to an
65
* address before attempting to match it against watchpoints.
66
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
67
const char *gdb_core_xml_file;
68
gchar * (*gdb_arch_name)(CPUState *cpu);
69
const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
70
- void (*cpu_exec_enter)(CPUState *cpu);
71
- void (*cpu_exec_exit)(CPUState *cpu);
72
- bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
73
74
void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
75
vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
76
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/accel/tcg/cpu-exec.c
79
+++ b/accel/tcg/cpu-exec.c
80
@@ -XXX,XX +XXX,XX @@ static void cpu_exec_enter(CPUState *cpu)
81
{
82
CPUClass *cc = CPU_GET_CLASS(cpu);
83
84
- if (cc->cpu_exec_enter) {
85
- cc->cpu_exec_enter(cpu);
86
+ if (cc->tcg_ops.cpu_exec_enter) {
87
+ cc->tcg_ops.cpu_exec_enter(cpu);
88
}
89
}
15
}
90
16
91
@@ -XXX,XX +XXX,XX @@ static void cpu_exec_exit(CPUState *cpu)
17
/* Return a maximum finite value with the requested sign */
92
{
18
-#define GEN_XF_ROUND(SUFFIX, MANTBITS, INF_EXP, INTERNAL_TYPE) \
93
CPUClass *cc = CPU_GET_CLASS(cpu);
19
-static SUFFIX accum_round_##SUFFIX(Accum a, float_status * fp_status) \
94
20
-{ \
95
- if (cc->cpu_exec_exit) {
21
- if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0) \
96
- cc->cpu_exec_exit(cpu);
22
- && ((a.guard | a.round | a.sticky) == 0)) { \
97
+ if (cc->tcg_ops.cpu_exec_exit) {
23
- /* result zero */ \
98
+ cc->tcg_ops.cpu_exec_exit(cpu);
24
- switch (fp_status->float_rounding_mode) { \
99
}
25
- case float_round_down: \
26
- return zero_##SUFFIX(1); \
27
- default: \
28
- return zero_##SUFFIX(0); \
29
- } \
30
- } \
31
- /* Normalize right */ \
32
- /* We want MANTBITS bits of mantissa plus the leading one. */ \
33
- /* That means that we want MANTBITS+1 bits, or 0x000000000000FF_FFFF */ \
34
- /* So we need to normalize right while the high word is non-zero and \
35
- * while the low word is nonzero when masked with 0xffe0_0000_0000_0000 */ \
36
- while ((int128_gethi(a.mant) != 0) || \
37
- ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0)) { \
38
- a = accum_norm_right(a, 1); \
39
- } \
40
- /* \
41
- * OK, now normalize left \
42
- * We want to normalize left until we have a leading one in bit 24 \
43
- * Theoretically, we only need to shift a maximum of one to the left if we \
44
- * shifted out lots of bits from B, or if we had no shift / 1 shift sticky \
45
- * should be 0 \
46
- */ \
47
- while ((int128_getlo(a.mant) & (1ULL << MANTBITS)) == 0) { \
48
- a = accum_norm_left(a); \
49
- } \
50
- /* \
51
- * OK, now we might need to denormalize because of potential underflow. \
52
- * We need to do this before rounding, and rounding might make us normal \
53
- * again \
54
- */ \
55
- while (a.exp <= 0) { \
56
- a = accum_norm_right(a, 1 - a.exp); \
57
- /* \
58
- * Do we have underflow? \
59
- * That's when we get an inexact answer because we ran out of bits \
60
- * in a denormal. \
61
- */ \
62
- if (a.guard || a.round || a.sticky) { \
63
- float_raise(float_flag_underflow, fp_status); \
64
- } \
65
- } \
66
- /* OK, we're relatively canonical... now we need to round */ \
67
- if (a.guard || a.round || a.sticky) { \
68
- float_raise(float_flag_inexact, fp_status); \
69
- switch (fp_status->float_rounding_mode) { \
70
- case float_round_to_zero: \
71
- /* Chop and we're done */ \
72
- break; \
73
- case float_round_up: \
74
- if (a.sign == 0) { \
75
- a.mant = int128_add(a.mant, int128_one()); \
76
- } \
77
- break; \
78
- case float_round_down: \
79
- if (a.sign != 0) { \
80
- a.mant = int128_add(a.mant, int128_one()); \
81
- } \
82
- break; \
83
- default: \
84
- if (a.round || a.sticky) { \
85
- /* round up if guard is 1, down if guard is zero */ \
86
- a.mant = int128_add(a.mant, int128_make64(a.guard)); \
87
- } else if (a.guard) { \
88
- /* exactly .5, round up if odd */ \
89
- a.mant = int128_add(a.mant, int128_and(a.mant, int128_one())); \
90
- } \
91
- break; \
92
- } \
93
- } \
94
- /* \
95
- * OK, now we might have carried all the way up. \
96
- * So we might need to shr once \
97
- * at least we know that the lsb should be zero if we rounded and \
98
- * got a carry out... \
99
- */ \
100
- if ((int128_getlo(a.mant) >> (MANTBITS + 1)) != 0) { \
101
- a = accum_norm_right(a, 1); \
102
- } \
103
- /* Overflow? */ \
104
- if (a.exp >= INF_EXP) { \
105
- /* Yep, inf result */ \
106
- float_raise(float_flag_overflow, fp_status); \
107
- float_raise(float_flag_inexact, fp_status); \
108
- switch (fp_status->float_rounding_mode) { \
109
- case float_round_to_zero: \
110
- return maxfinite_##SUFFIX(a.sign); \
111
- case float_round_up: \
112
- if (a.sign == 0) { \
113
- return infinite_##SUFFIX(a.sign); \
114
- } else { \
115
- return maxfinite_##SUFFIX(a.sign); \
116
- } \
117
- case float_round_down: \
118
- if (a.sign != 0) { \
119
- return infinite_##SUFFIX(a.sign); \
120
- } else { \
121
- return maxfinite_##SUFFIX(a.sign); \
122
- } \
123
- default: \
124
- return infinite_##SUFFIX(a.sign); \
125
- } \
126
- } \
127
- /* Underflow? */ \
128
- if (int128_getlo(a.mant) & (1ULL << MANTBITS)) { \
129
- /* Leading one means: No, we're normal. So, we should be done... */ \
130
- INTERNAL_TYPE ret; \
131
- ret.i = 0; \
132
- ret.sign = a.sign; \
133
- ret.exp = a.exp; \
134
- ret.mant = int128_getlo(a.mant); \
135
- return ret.i; \
136
- } \
137
- assert(a.exp == 1); \
138
- INTERNAL_TYPE ret; \
139
- ret.i = 0; \
140
- ret.sign = a.sign; \
141
- ret.exp = 0; \
142
- ret.mant = int128_getlo(a.mant); \
143
- return ret.i; \
144
+static float64 accum_round_float64(Accum a, float_status *fp_status)
145
+{
146
+ if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
147
+ && ((a.guard | a.round | a.sticky) == 0)) {
148
+ /* result zero */
149
+ switch (fp_status->float_rounding_mode) {
150
+ case float_round_down:
151
+ return zero_float64(1);
152
+ default:
153
+ return zero_float64(0);
154
+ }
155
+ }
156
+ /*
157
+ * Normalize right
158
+ * We want DF_MANTBITS bits of mantissa plus the leading one.
159
+ * That means that we want DF_MANTBITS+1 bits, or 0x000000000000FF_FFFF
160
+ * So we need to normalize right while the high word is non-zero and
161
+ * while the low word is nonzero when masked with 0xffe0_0000_0000_0000
162
+ */
163
+ while ((int128_gethi(a.mant) != 0) ||
164
+ ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0)) {
165
+ a = accum_norm_right(a, 1);
166
+ }
167
+ /*
168
+ * OK, now normalize left
169
+ * We want to normalize left until we have a leading one in bit 24
170
+ * Theoretically, we only need to shift a maximum of one to the left if we
171
+ * shifted out lots of bits from B, or if we had no shift / 1 shift sticky
172
+ * should be 0
173
+ */
174
+ while ((int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) == 0) {
175
+ a = accum_norm_left(a);
176
+ }
177
+ /*
178
+ * OK, now we might need to denormalize because of potential underflow.
179
+ * We need to do this before rounding, and rounding might make us normal
180
+ * again
181
+ */
182
+ while (a.exp <= 0) {
183
+ a = accum_norm_right(a, 1 - a.exp);
184
+ /*
185
+ * Do we have underflow?
186
+ * That's when we get an inexact answer because we ran out of bits
187
+ * in a denormal.
188
+ */
189
+ if (a.guard || a.round || a.sticky) {
190
+ float_raise(float_flag_underflow, fp_status);
191
+ }
192
+ }
193
+ /* OK, we're relatively canonical... now we need to round */
194
+ if (a.guard || a.round || a.sticky) {
195
+ float_raise(float_flag_inexact, fp_status);
196
+ switch (fp_status->float_rounding_mode) {
197
+ case float_round_to_zero:
198
+ /* Chop and we're done */
199
+ break;
200
+ case float_round_up:
201
+ if (a.sign == 0) {
202
+ a.mant = int128_add(a.mant, int128_one());
203
+ }
204
+ break;
205
+ case float_round_down:
206
+ if (a.sign != 0) {
207
+ a.mant = int128_add(a.mant, int128_one());
208
+ }
209
+ break;
210
+ default:
211
+ if (a.round || a.sticky) {
212
+ /* round up if guard is 1, down if guard is zero */
213
+ a.mant = int128_add(a.mant, int128_make64(a.guard));
214
+ } else if (a.guard) {
215
+ /* exactly .5, round up if odd */
216
+ a.mant = int128_add(a.mant, int128_and(a.mant, int128_one()));
217
+ }
218
+ break;
219
+ }
220
+ }
221
+ /*
222
+ * OK, now we might have carried all the way up.
223
+ * So we might need to shr once
224
+ * at least we know that the lsb should be zero if we rounded and
225
+ * got a carry out...
226
+ */
227
+ if ((int128_getlo(a.mant) >> (DF_MANTBITS + 1)) != 0) {
228
+ a = accum_norm_right(a, 1);
229
+ }
230
+ /* Overflow? */
231
+ if (a.exp >= DF_INF_EXP) {
232
+ /* Yep, inf result */
233
+ float_raise(float_flag_overflow, fp_status);
234
+ float_raise(float_flag_inexact, fp_status);
235
+ switch (fp_status->float_rounding_mode) {
236
+ case float_round_to_zero:
237
+ return maxfinite_float64(a.sign);
238
+ case float_round_up:
239
+ if (a.sign == 0) {
240
+ return infinite_float64(a.sign);
241
+ } else {
242
+ return maxfinite_float64(a.sign);
243
+ }
244
+ case float_round_down:
245
+ if (a.sign != 0) {
246
+ return infinite_float64(a.sign);
247
+ } else {
248
+ return maxfinite_float64(a.sign);
249
+ }
250
+ default:
251
+ return infinite_float64(a.sign);
252
+ }
253
+ }
254
+ /* Underflow? */
255
+ if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
256
+ /* Leading one means: No, we're normal. So, we should be done... */
257
+ Double ret;
258
+ ret.i = 0;
259
+ ret.sign = a.sign;
260
+ ret.exp = a.exp;
261
+ ret.mant = int128_getlo(a.mant);
262
+ return ret.i;
263
+ }
264
+ assert(a.exp == 1);
265
+ Double ret;
266
+ ret.i = 0;
267
+ ret.sign = a.sign;
268
+ ret.exp = 0;
269
+ ret.mant = int128_getlo(a.mant);
270
+ return ret.i;
100
}
271
}
101
272
102
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
273
-GEN_XF_ROUND(float64, DF_MANTBITS, DF_INF_EXP, Double)
103
True when it is, and we should restart on a new TB,
274
-
104
and via longjmp via cpu_loop_exit. */
275
float64 internal_mpyhh(float64 a, float64 b,
105
else {
276
unsigned long long int accumulated,
106
- if (cc->cpu_exec_interrupt &&
277
float_status *fp_status)
107
- cc->cpu_exec_interrupt(cpu, interrupt_request)) {
108
+ if (cc->tcg_ops.cpu_exec_interrupt &&
109
+ cc->tcg_ops.cpu_exec_interrupt(cpu, interrupt_request)) {
110
if (need_replay_interrupt(interrupt_request)) {
111
replay_interrupt();
112
}
113
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
114
index XXXXXXX..XXXXXXX 100644
115
--- a/target/alpha/cpu.c
116
+++ b/target/alpha/cpu.c
117
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
118
cc->class_by_name = alpha_cpu_class_by_name;
119
cc->has_work = alpha_cpu_has_work;
120
cc->do_interrupt = alpha_cpu_do_interrupt;
121
- cc->cpu_exec_interrupt = alpha_cpu_exec_interrupt;
122
+ cc->tcg_ops.cpu_exec_interrupt = alpha_cpu_exec_interrupt;
123
cc->dump_state = alpha_cpu_dump_state;
124
cc->set_pc = alpha_cpu_set_pc;
125
cc->gdb_read_register = alpha_cpu_gdb_read_register;
126
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/target/arm/cpu.c
129
+++ b/target/arm/cpu.c
130
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
131
132
cc->class_by_name = arm_cpu_class_by_name;
133
cc->has_work = arm_cpu_has_work;
134
- cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
135
cc->dump_state = arm_cpu_dump_state;
136
cc->set_pc = arm_cpu_set_pc;
137
cc->gdb_read_register = arm_cpu_gdb_read_register;
138
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
139
cc->disas_set_info = arm_disas_set_info;
140
#ifdef CONFIG_TCG
141
cc->tcg_ops.initialize = arm_translate_init;
142
+ cc->tcg_ops.cpu_exec_interrupt = arm_cpu_exec_interrupt;
143
cc->tcg_ops.synchronize_from_tb = arm_cpu_synchronize_from_tb;
144
cc->tlb_fill = arm_cpu_tlb_fill;
145
cc->debug_excp_handler = arm_debug_excp_handler;
146
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/target/arm/cpu64.c
149
+++ b/target/arm/cpu64.c
150
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
151
{
152
CPUClass *cc = CPU_CLASS(oc);
153
154
- cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
155
+#ifdef CONFIG_TCG
156
+ cc->tcg_ops.cpu_exec_interrupt = arm_cpu_exec_interrupt;
157
+#endif /* CONFIG_TCG */
158
+
159
cc->gdb_read_register = aarch64_cpu_gdb_read_register;
160
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
161
cc->gdb_num_core_regs = 34;
162
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/target/arm/cpu_tcg.c
165
+++ b/target/arm/cpu_tcg.c
166
@@ -XXX,XX +XXX,XX @@
167
/* CPU models. These are not needed for the AArch64 linux-user build. */
168
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
169
170
+#ifdef CONFIG_TCG
171
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
172
{
173
CPUClass *cc = CPU_GET_CLASS(cs);
174
@@ -XXX,XX +XXX,XX @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
175
}
176
return ret;
177
}
178
+#endif /* CONFIG_TCG */
179
180
static void arm926_initfn(Object *obj)
181
{
182
@@ -XXX,XX +XXX,XX @@ static void arm_v7m_class_init(ObjectClass *oc, void *data)
183
cc->do_interrupt = arm_v7m_cpu_do_interrupt;
184
#endif
185
186
- cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
187
+#ifdef CONFIG_TCG
188
+ cc->tcg_ops.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
189
+#endif /* CONFIG_TCG */
190
+
191
cc->gdb_core_xml_file = "arm-m-profile.xml";
192
}
193
194
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
195
index XXXXXXX..XXXXXXX 100644
196
--- a/target/avr/cpu.c
197
+++ b/target/avr/cpu.c
198
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
199
200
cc->has_work = avr_cpu_has_work;
201
cc->do_interrupt = avr_cpu_do_interrupt;
202
- cc->cpu_exec_interrupt = avr_cpu_exec_interrupt;
203
+ cc->tcg_ops.cpu_exec_interrupt = avr_cpu_exec_interrupt;
204
cc->dump_state = avr_cpu_dump_state;
205
cc->set_pc = avr_cpu_set_pc;
206
cc->memory_rw_debug = avr_cpu_memory_rw_debug;
207
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
208
index XXXXXXX..XXXXXXX 100644
209
--- a/target/cris/cpu.c
210
+++ b/target/cris/cpu.c
211
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
212
cc->class_by_name = cris_cpu_class_by_name;
213
cc->has_work = cris_cpu_has_work;
214
cc->do_interrupt = cris_cpu_do_interrupt;
215
- cc->cpu_exec_interrupt = cris_cpu_exec_interrupt;
216
+ cc->tcg_ops.cpu_exec_interrupt = cris_cpu_exec_interrupt;
217
cc->dump_state = cris_cpu_dump_state;
218
cc->set_pc = cris_cpu_set_pc;
219
cc->gdb_read_register = cris_cpu_gdb_read_register;
220
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
221
index XXXXXXX..XXXXXXX 100644
222
--- a/target/hppa/cpu.c
223
+++ b/target/hppa/cpu.c
224
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
225
cc->class_by_name = hppa_cpu_class_by_name;
226
cc->has_work = hppa_cpu_has_work;
227
cc->do_interrupt = hppa_cpu_do_interrupt;
228
- cc->cpu_exec_interrupt = hppa_cpu_exec_interrupt;
229
+ cc->tcg_ops.cpu_exec_interrupt = hppa_cpu_exec_interrupt;
230
cc->dump_state = hppa_cpu_dump_state;
231
cc->set_pc = hppa_cpu_set_pc;
232
cc->tcg_ops.synchronize_from_tb = hppa_cpu_synchronize_from_tb;
233
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
234
index XXXXXXX..XXXXXXX 100644
235
--- a/target/i386/tcg/tcg-cpu.c
236
+++ b/target/i386/tcg/tcg-cpu.c
237
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
238
void tcg_cpu_common_class_init(CPUClass *cc)
239
{
240
cc->do_interrupt = x86_cpu_do_interrupt;
241
- cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
242
+ cc->tcg_ops.cpu_exec_interrupt = x86_cpu_exec_interrupt;
243
cc->tcg_ops.synchronize_from_tb = x86_cpu_synchronize_from_tb;
244
- cc->cpu_exec_enter = x86_cpu_exec_enter;
245
- cc->cpu_exec_exit = x86_cpu_exec_exit;
246
+ cc->tcg_ops.cpu_exec_enter = x86_cpu_exec_enter;
247
+ cc->tcg_ops.cpu_exec_exit = x86_cpu_exec_exit;
248
cc->tcg_ops.initialize = tcg_x86_init;
249
cc->tlb_fill = x86_cpu_tlb_fill;
250
#ifndef CONFIG_USER_ONLY
251
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/lm32/cpu.c
254
+++ b/target/lm32/cpu.c
255
@@ -XXX,XX +XXX,XX @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
256
cc->class_by_name = lm32_cpu_class_by_name;
257
cc->has_work = lm32_cpu_has_work;
258
cc->do_interrupt = lm32_cpu_do_interrupt;
259
- cc->cpu_exec_interrupt = lm32_cpu_exec_interrupt;
260
+ cc->tcg_ops.cpu_exec_interrupt = lm32_cpu_exec_interrupt;
261
cc->dump_state = lm32_cpu_dump_state;
262
cc->set_pc = lm32_cpu_set_pc;
263
cc->gdb_read_register = lm32_cpu_gdb_read_register;
264
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/m68k/cpu.c
267
+++ b/target/m68k/cpu.c
268
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
269
cc->class_by_name = m68k_cpu_class_by_name;
270
cc->has_work = m68k_cpu_has_work;
271
cc->do_interrupt = m68k_cpu_do_interrupt;
272
- cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt;
273
+ cc->tcg_ops.cpu_exec_interrupt = m68k_cpu_exec_interrupt;
274
cc->dump_state = m68k_cpu_dump_state;
275
cc->set_pc = m68k_cpu_set_pc;
276
cc->gdb_read_register = m68k_cpu_gdb_read_register;
277
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/microblaze/cpu.c
280
+++ b/target/microblaze/cpu.c
281
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
282
cc->has_work = mb_cpu_has_work;
283
cc->do_interrupt = mb_cpu_do_interrupt;
284
cc->do_unaligned_access = mb_cpu_do_unaligned_access;
285
- cc->cpu_exec_interrupt = mb_cpu_exec_interrupt;
286
+ cc->tcg_ops.cpu_exec_interrupt = mb_cpu_exec_interrupt;
287
cc->dump_state = mb_cpu_dump_state;
288
cc->set_pc = mb_cpu_set_pc;
289
cc->tcg_ops.synchronize_from_tb = mb_cpu_synchronize_from_tb;
290
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
291
index XXXXXXX..XXXXXXX 100644
292
--- a/target/mips/cpu.c
293
+++ b/target/mips/cpu.c
294
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
295
cc->class_by_name = mips_cpu_class_by_name;
296
cc->has_work = mips_cpu_has_work;
297
cc->do_interrupt = mips_cpu_do_interrupt;
298
- cc->cpu_exec_interrupt = mips_cpu_exec_interrupt;
299
cc->dump_state = mips_cpu_dump_state;
300
cc->set_pc = mips_cpu_set_pc;
301
cc->gdb_read_register = mips_cpu_gdb_read_register;
302
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
303
cc->disas_set_info = mips_cpu_disas_set_info;
304
#ifdef CONFIG_TCG
305
cc->tcg_ops.initialize = mips_tcg_init;
306
+ cc->tcg_ops.cpu_exec_interrupt = mips_cpu_exec_interrupt;
307
cc->tcg_ops.synchronize_from_tb = mips_cpu_synchronize_from_tb;
308
cc->tlb_fill = mips_cpu_tlb_fill;
309
#endif
310
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
311
index XXXXXXX..XXXXXXX 100644
312
--- a/target/nios2/cpu.c
313
+++ b/target/nios2/cpu.c
314
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
315
cc->class_by_name = nios2_cpu_class_by_name;
316
cc->has_work = nios2_cpu_has_work;
317
cc->do_interrupt = nios2_cpu_do_interrupt;
318
- cc->cpu_exec_interrupt = nios2_cpu_exec_interrupt;
319
+ cc->tcg_ops.cpu_exec_interrupt = nios2_cpu_exec_interrupt;
320
cc->dump_state = nios2_cpu_dump_state;
321
cc->set_pc = nios2_cpu_set_pc;
322
cc->disas_set_info = nios2_cpu_disas_set_info;
323
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
324
index XXXXXXX..XXXXXXX 100644
325
--- a/target/openrisc/cpu.c
326
+++ b/target/openrisc/cpu.c
327
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
328
cc->class_by_name = openrisc_cpu_class_by_name;
329
cc->has_work = openrisc_cpu_has_work;
330
cc->do_interrupt = openrisc_cpu_do_interrupt;
331
- cc->cpu_exec_interrupt = openrisc_cpu_exec_interrupt;
332
+ cc->tcg_ops.cpu_exec_interrupt = openrisc_cpu_exec_interrupt;
333
cc->dump_state = openrisc_cpu_dump_state;
334
cc->set_pc = openrisc_cpu_set_pc;
335
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
336
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
337
index XXXXXXX..XXXXXXX 100644
338
--- a/target/riscv/cpu.c
339
+++ b/target/riscv/cpu.c
340
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
341
cc->class_by_name = riscv_cpu_class_by_name;
342
cc->has_work = riscv_cpu_has_work;
343
cc->do_interrupt = riscv_cpu_do_interrupt;
344
- cc->cpu_exec_interrupt = riscv_cpu_exec_interrupt;
345
+ cc->tcg_ops.cpu_exec_interrupt = riscv_cpu_exec_interrupt;
346
cc->dump_state = riscv_cpu_dump_state;
347
cc->set_pc = riscv_cpu_set_pc;
348
cc->tcg_ops.synchronize_from_tb = riscv_cpu_synchronize_from_tb;
349
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
350
index XXXXXXX..XXXXXXX 100644
351
--- a/target/rx/cpu.c
352
+++ b/target/rx/cpu.c
353
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
354
cc->class_by_name = rx_cpu_class_by_name;
355
cc->has_work = rx_cpu_has_work;
356
cc->do_interrupt = rx_cpu_do_interrupt;
357
- cc->cpu_exec_interrupt = rx_cpu_exec_interrupt;
358
+ cc->tcg_ops.cpu_exec_interrupt = rx_cpu_exec_interrupt;
359
cc->dump_state = rx_cpu_dump_state;
360
cc->set_pc = rx_cpu_set_pc;
361
cc->tcg_ops.synchronize_from_tb = rx_cpu_synchronize_from_tb;
362
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
363
index XXXXXXX..XXXXXXX 100644
364
--- a/target/s390x/cpu.c
365
+++ b/target/s390x/cpu.c
366
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
367
cc->get_crash_info = s390_cpu_get_crash_info;
368
cc->write_elf64_note = s390_cpu_write_elf64_note;
369
#ifdef CONFIG_TCG
370
- cc->cpu_exec_interrupt = s390_cpu_exec_interrupt;
371
+ cc->tcg_ops.cpu_exec_interrupt = s390_cpu_exec_interrupt;
372
cc->debug_excp_handler = s390x_cpu_debug_excp_handler;
373
cc->do_unaligned_access = s390x_cpu_do_unaligned_access;
374
#endif
375
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
376
index XXXXXXX..XXXXXXX 100644
377
--- a/target/sh4/cpu.c
378
+++ b/target/sh4/cpu.c
379
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
380
cc->class_by_name = superh_cpu_class_by_name;
381
cc->has_work = superh_cpu_has_work;
382
cc->do_interrupt = superh_cpu_do_interrupt;
383
- cc->cpu_exec_interrupt = superh_cpu_exec_interrupt;
384
+ cc->tcg_ops.cpu_exec_interrupt = superh_cpu_exec_interrupt;
385
cc->dump_state = superh_cpu_dump_state;
386
cc->set_pc = superh_cpu_set_pc;
387
cc->tcg_ops.synchronize_from_tb = superh_cpu_synchronize_from_tb;
388
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
389
index XXXXXXX..XXXXXXX 100644
390
--- a/target/sparc/cpu.c
391
+++ b/target/sparc/cpu.c
392
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
393
cc->parse_features = sparc_cpu_parse_features;
394
cc->has_work = sparc_cpu_has_work;
395
cc->do_interrupt = sparc_cpu_do_interrupt;
396
- cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt;
397
+ cc->tcg_ops.cpu_exec_interrupt = sparc_cpu_exec_interrupt;
398
cc->dump_state = sparc_cpu_dump_state;
399
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
400
cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
401
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
402
index XXXXXXX..XXXXXXX 100644
403
--- a/target/tilegx/cpu.c
404
+++ b/target/tilegx/cpu.c
405
@@ -XXX,XX +XXX,XX @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
406
cc->class_by_name = tilegx_cpu_class_by_name;
407
cc->has_work = tilegx_cpu_has_work;
408
cc->do_interrupt = tilegx_cpu_do_interrupt;
409
- cc->cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
410
+ cc->tcg_ops.cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
411
cc->dump_state = tilegx_cpu_dump_state;
412
cc->set_pc = tilegx_cpu_set_pc;
413
cc->tlb_fill = tilegx_cpu_tlb_fill;
414
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
415
index XXXXXXX..XXXXXXX 100644
416
--- a/target/unicore32/cpu.c
417
+++ b/target/unicore32/cpu.c
418
@@ -XXX,XX +XXX,XX @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
419
cc->class_by_name = uc32_cpu_class_by_name;
420
cc->has_work = uc32_cpu_has_work;
421
cc->do_interrupt = uc32_cpu_do_interrupt;
422
- cc->cpu_exec_interrupt = uc32_cpu_exec_interrupt;
423
+ cc->tcg_ops.cpu_exec_interrupt = uc32_cpu_exec_interrupt;
424
cc->dump_state = uc32_cpu_dump_state;
425
cc->set_pc = uc32_cpu_set_pc;
426
cc->tlb_fill = uc32_cpu_tlb_fill;
427
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
428
index XXXXXXX..XXXXXXX 100644
429
--- a/target/xtensa/cpu.c
430
+++ b/target/xtensa/cpu.c
431
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
432
cc->class_by_name = xtensa_cpu_class_by_name;
433
cc->has_work = xtensa_cpu_has_work;
434
cc->do_interrupt = xtensa_cpu_do_interrupt;
435
- cc->cpu_exec_interrupt = xtensa_cpu_exec_interrupt;
436
+ cc->tcg_ops.cpu_exec_interrupt = xtensa_cpu_exec_interrupt;
437
cc->dump_state = xtensa_cpu_dump_state;
438
cc->set_pc = xtensa_cpu_set_pc;
439
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
440
diff --git a/target/ppc/translate_init.c.inc b/target/ppc/translate_init.c.inc
441
index XXXXXXX..XXXXXXX 100644
442
--- a/target/ppc/translate_init.c.inc
443
+++ b/target/ppc/translate_init.c.inc
444
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_reset(DeviceState *dev)
445
}
446
447
#ifndef CONFIG_USER_ONLY
448
+
449
static bool ppc_cpu_is_big_endian(CPUState *cs)
450
{
451
PowerPCCPU *cpu = POWERPC_CPU(cs);
452
@@ -XXX,XX +XXX,XX @@ static bool ppc_cpu_is_big_endian(CPUState *cs)
453
return !msr_le;
454
}
455
456
+#ifdef CONFIG_TCG
457
static void ppc_cpu_exec_enter(CPUState *cs)
458
{
459
PowerPCCPU *cpu = POWERPC_CPU(cs);
460
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_exec_exit(CPUState *cs)
461
vhc->cpu_exec_exit(cpu->vhyp, cpu);
462
}
463
}
464
-#endif
465
+#endif /* CONFIG_TCG */
466
+
467
+#endif /* !CONFIG_USER_ONLY */
468
469
static void ppc_cpu_instance_init(Object *obj)
470
{
471
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
472
cc->class_by_name = ppc_cpu_class_by_name;
473
cc->has_work = ppc_cpu_has_work;
474
cc->do_interrupt = ppc_cpu_do_interrupt;
475
- cc->cpu_exec_interrupt = ppc_cpu_exec_interrupt;
476
cc->dump_state = ppc_cpu_dump_state;
477
cc->dump_statistics = ppc_cpu_dump_statistics;
478
cc->set_pc = ppc_cpu_set_pc;
479
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
480
#endif
481
#ifdef CONFIG_TCG
482
cc->tcg_ops.initialize = ppc_translate_init;
483
+ cc->tcg_ops.cpu_exec_interrupt = ppc_cpu_exec_interrupt;
484
cc->tlb_fill = ppc_cpu_tlb_fill;
485
-#endif
486
#ifndef CONFIG_USER_ONLY
487
- cc->cpu_exec_enter = ppc_cpu_exec_enter;
488
- cc->cpu_exec_exit = ppc_cpu_exec_exit;
489
-#endif
490
+ cc->tcg_ops.cpu_exec_enter = ppc_cpu_exec_enter;
491
+ cc->tcg_ops.cpu_exec_exit = ppc_cpu_exec_exit;
492
+#endif /* !CONFIG_USER_ONLY */
493
+#endif /* CONFIG_TCG */
494
495
cc->disas_set_info = ppc_disas_set_info;
496
497
--
278
--
498
2.25.1
279
2.43.0
499
500
diff view generated by jsdifflib
1
Each thread must have its own pc, even under TCI.
1
This structure, with bitfields, is incorrect for big-endian.
2
Use the existing float32_getexp_raw which uses extract32.
2
3
3
Remove the GETPC ifdef, because GETPC is always available for
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
helpers, and thus is always required. Move the assignment
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
under INDEX_op_call, because the value is only visible when
6
---
6
we make a call to a helper function.
7
target/hexagon/fma_emu.c | 16 +++-------------
8
1 file changed, 3 insertions(+), 13 deletions(-)
7
9
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20210204014509.882821-6-richard.henderson@linaro.org>
11
---
12
include/exec/exec-all.h | 2 +-
13
tcg/tcg-common.c | 4 ----
14
tcg/tci.c | 7 +++----
15
3 files changed, 4 insertions(+), 9 deletions(-)
16
17
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/exec-all.h
12
--- a/target/hexagon/fma_emu.c
20
+++ b/include/exec/exec-all.h
13
+++ b/target/hexagon/fma_emu.c
21
@@ -XXX,XX +XXX,XX @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
14
@@ -XXX,XX +XXX,XX @@ typedef union {
22
15
};
23
/* GETPC is the true target of the return instruction that we'll execute. */
16
} Double;
24
#if defined(CONFIG_TCG_INTERPRETER)
17
25
-extern uintptr_t tci_tb_ptr;
18
-typedef union {
26
+extern __thread uintptr_t tci_tb_ptr;
19
- float f;
27
# define GETPC() tci_tb_ptr
20
- uint32_t i;
28
#else
21
- struct {
29
# define GETPC() \
22
- uint32_t mant:23;
30
diff --git a/tcg/tcg-common.c b/tcg/tcg-common.c
23
- uint32_t exp:8;
31
index XXXXXXX..XXXXXXX 100644
24
- uint32_t sign:1;
32
--- a/tcg/tcg-common.c
25
- };
33
+++ b/tcg/tcg-common.c
26
-} Float;
34
@@ -XXX,XX +XXX,XX @@
35
#include "qemu/osdep.h"
36
#include "tcg/tcg.h"
37
38
-#if defined(CONFIG_TCG_INTERPRETER)
39
-uintptr_t tci_tb_ptr;
40
-#endif
41
-
27
-
42
TCGOpDef tcg_op_defs[] = {
28
static uint64_t float64_getmant(float64 f64)
43
#define DEF(s, oargs, iargs, cargs, flags) \
44
{ #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
45
diff --git a/tcg/tci.c b/tcg/tci.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/tcg/tci.c
48
+++ b/tcg/tci.c
49
@@ -XXX,XX +XXX,XX @@ typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
50
tcg_target_ulong, tcg_target_ulong);
51
#endif
52
53
+__thread uintptr_t tci_tb_ptr;
54
+
55
static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
56
{
29
{
57
tci_assert(index < TCG_TARGET_NB_REGS);
30
Double a = { .i = f64 };
58
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
31
@@ -XXX,XX +XXX,XX @@ int32_t float64_getexp(float64 f64)
59
#endif
32
60
TCGMemOpIdx oi;
33
int32_t float32_getexp(float32 f32)
61
34
{
62
-#if defined(GETPC)
35
- Float a = { .i = f32 };
63
- tci_tb_ptr = (uintptr_t)tb_ptr;
36
+ int exp = float32_getexp_raw(f32);
64
-#endif
37
if (float32_is_normal(f32)) {
65
-
38
- return a.exp;
66
/* Skip opcode and size entry. */
39
+ return exp;
67
tb_ptr += 2;
40
}
68
41
if (float32_is_denormal(f32)) {
69
switch (opc) {
42
- return a.exp + 1;
70
case INDEX_op_call:
43
+ return exp + 1;
71
t0 = tci_read_ri(regs, &tb_ptr);
44
}
72
+ tci_tb_ptr = (uintptr_t)tb_ptr;
45
return -1;
73
#if TCG_TARGET_REG_BITS == 32
46
}
74
tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
75
tci_read_reg(regs, TCG_REG_R1),
76
--
47
--
77
2.25.1
48
2.43.0
78
79
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
This structure, with bitfields, is incorrect for big-endian.
2
Use extract64 and deposit64 instead.
2
3
3
"exec/cpu-defs.h" contains generic CPU definitions for the
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
TCG frontends (mostly related to TLB). TCG backends definitions
5
aren't relevant here.
6
7
See tcg/README description:
8
9
4) Backend
10
11
tcg-target.h contains the target specific definitions. tcg-target.c.inc
12
contains the target specific code; it is #included by tcg/tcg.c, rather
13
than being a standalone C file.
14
15
So far only "tcg/tcg.h" requires these headers.
16
17
Remove the "target-tcg.h" header dependency on TCG frontends, so we
18
don't have to rebuild all frontends when hacking a single backend.
19
20
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
21
Message-Id: <20210204191423.1754158-1-f4bug@amsat.org>
22
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
23
---
6
---
24
include/exec/cpu-defs.h | 3 ---
7
target/hexagon/fma_emu.c | 46 ++++++++++++++--------------------------
25
1 file changed, 3 deletions(-)
8
1 file changed, 16 insertions(+), 30 deletions(-)
26
9
27
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
28
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
29
--- a/include/exec/cpu-defs.h
12
--- a/target/hexagon/fma_emu.c
30
+++ b/include/exec/cpu-defs.h
13
+++ b/target/hexagon/fma_emu.c
31
@@ -XXX,XX +XXX,XX @@
14
@@ -XXX,XX +XXX,XX @@
32
15
33
#include "qemu/host-utils.h"
16
#define WAY_BIG_EXP 4096
34
#include "qemu/thread.h"
17
35
-#ifdef CONFIG_TCG
18
-typedef union {
36
-#include "tcg-target.h"
19
- double f;
37
-#endif
20
- uint64_t i;
38
#ifndef CONFIG_USER_ONLY
21
- struct {
39
#include "exec/hwaddr.h"
22
- uint64_t mant:52;
40
#endif
23
- uint64_t exp:11;
24
- uint64_t sign:1;
25
- };
26
-} Double;
27
-
28
static uint64_t float64_getmant(float64 f64)
29
{
30
- Double a = { .i = f64 };
31
+ uint64_t mant = extract64(f64, 0, 52);
32
if (float64_is_normal(f64)) {
33
- return a.mant | 1ULL << 52;
34
+ return mant | 1ULL << 52;
35
}
36
if (float64_is_zero(f64)) {
37
return 0;
38
}
39
if (float64_is_denormal(f64)) {
40
- return a.mant;
41
+ return mant;
42
}
43
return ~0ULL;
44
}
45
46
int32_t float64_getexp(float64 f64)
47
{
48
- Double a = { .i = f64 };
49
+ int exp = extract64(f64, 52, 11);
50
if (float64_is_normal(f64)) {
51
- return a.exp;
52
+ return exp;
53
}
54
if (float64_is_denormal(f64)) {
55
- return a.exp + 1;
56
+ return exp + 1;
57
}
58
return -1;
59
}
60
@@ -XXX,XX +XXX,XX @@ float32 infinite_float32(uint8_t sign)
61
/* Return a maximum finite value with the requested sign */
62
static float64 accum_round_float64(Accum a, float_status *fp_status)
63
{
64
+ uint64_t ret;
65
+
66
if ((int128_gethi(a.mant) == 0) && (int128_getlo(a.mant) == 0)
67
&& ((a.guard | a.round | a.sticky) == 0)) {
68
/* result zero */
69
@@ -XXX,XX +XXX,XX @@ static float64 accum_round_float64(Accum a, float_status *fp_status)
70
}
71
}
72
/* Underflow? */
73
- if (int128_getlo(a.mant) & (1ULL << DF_MANTBITS)) {
74
+ ret = int128_getlo(a.mant);
75
+ if (ret & (1ULL << DF_MANTBITS)) {
76
/* Leading one means: No, we're normal. So, we should be done... */
77
- Double ret;
78
- ret.i = 0;
79
- ret.sign = a.sign;
80
- ret.exp = a.exp;
81
- ret.mant = int128_getlo(a.mant);
82
- return ret.i;
83
+ ret = deposit64(ret, 52, 11, a.exp);
84
+ } else {
85
+ assert(a.exp == 1);
86
+ ret = deposit64(ret, 52, 11, 0);
87
}
88
- assert(a.exp == 1);
89
- Double ret;
90
- ret.i = 0;
91
- ret.sign = a.sign;
92
- ret.exp = 0;
93
- ret.mant = int128_getlo(a.mant);
94
- return ret.i;
95
+ ret = deposit64(ret, 63, 1, a.sign);
96
+ return ret;
97
}
98
99
float64 internal_mpyhh(float64 a, float64 b,
41
--
100
--
42
2.25.1
101
2.43.0
43
44
diff view generated by jsdifflib
1
For a 64-bit TCI, the upper bits of a 32-bit operation are
1
No need to open-code 64x64->128-bit multiplication.
2
undefined (much like a native ppc64 32-bit operation). It
3
simplifies everything if we don't force-extend the result.
4
2
5
Tested-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
tcg/tci.c | 66 +++++++++++++++++++++++++------------------------------
6
target/hexagon/fma_emu.c | 32 +++-----------------------------
10
1 file changed, 30 insertions(+), 36 deletions(-)
7
1 file changed, 3 insertions(+), 29 deletions(-)
11
8
12
diff --git a/tcg/tci.c b/tcg/tci.c
9
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/tci.c
11
--- a/target/hexagon/fma_emu.c
15
+++ b/tcg/tci.c
12
+++ b/target/hexagon/fma_emu.c
16
@@ -XXX,XX +XXX,XX @@ tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
13
@@ -XXX,XX +XXX,XX @@ int32_t float32_getexp(float32 f32)
17
regs[index] = value;
14
return -1;
18
}
15
}
19
16
20
-static void
17
-static uint32_t int128_getw0(Int128 x)
21
-tci_write_reg32(tcg_target_ulong *regs, TCGReg index, uint32_t value)
22
-{
18
-{
23
- tci_write_reg(regs, index, value);
19
- return int128_getlo(x);
24
-}
20
-}
25
-
21
-
26
#if TCG_TARGET_REG_BITS == 32
22
-static uint32_t int128_getw1(Int128 x)
27
static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
23
-{
28
uint32_t low_index, uint64_t value)
24
- return int128_getlo(x) >> 32;
29
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
25
-}
30
t1 = tci_read_r32(regs, &tb_ptr);
26
-
31
t2 = tci_read_ri32(regs, &tb_ptr);
27
static Int128 int128_mul_6464(uint64_t ai, uint64_t bi)
32
condition = *tb_ptr++;
28
{
33
- tci_write_reg32(regs, t0, tci_compare32(t1, t2, condition));
29
- Int128 a, b;
34
+ tci_write_reg(regs, t0, tci_compare32(t1, t2, condition));
30
- uint64_t pp0, pp1a, pp1b, pp1s, pp2;
35
break;
31
+ uint64_t l, h;
36
#if TCG_TARGET_REG_BITS == 32
32
37
case INDEX_op_setcond2_i32:
33
- a = int128_make64(ai);
38
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
34
- b = int128_make64(bi);
39
tmp64 = tci_read_r64(regs, &tb_ptr);
35
- pp0 = (uint64_t)int128_getw0(a) * (uint64_t)int128_getw0(b);
40
v64 = tci_read_ri64(regs, &tb_ptr);
36
- pp1a = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw0(b);
41
condition = *tb_ptr++;
37
- pp1b = (uint64_t)int128_getw1(b) * (uint64_t)int128_getw0(a);
42
- tci_write_reg32(regs, t0, tci_compare64(tmp64, v64, condition));
38
- pp2 = (uint64_t)int128_getw1(a) * (uint64_t)int128_getw1(b);
43
+ tci_write_reg(regs, t0, tci_compare64(tmp64, v64, condition));
39
-
44
break;
40
- pp1s = pp1a + pp1b;
45
#elif TCG_TARGET_REG_BITS == 64
41
- if ((pp1s < pp1a) || (pp1s < pp1b)) {
46
case INDEX_op_setcond_i64:
42
- pp2 += (1ULL << 32);
47
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
43
- }
48
case INDEX_op_mov_i32:
44
- uint64_t ret_low = pp0 + (pp1s << 32);
49
t0 = *tb_ptr++;
45
- if ((ret_low < pp0) || (ret_low < (pp1s << 32))) {
50
t1 = tci_read_r32(regs, &tb_ptr);
46
- pp2 += 1;
51
- tci_write_reg32(regs, t0, t1);
47
- }
52
+ tci_write_reg(regs, t0, t1);
48
-
53
break;
49
- return int128_make128(ret_low, pp2 + (pp1s >> 32));
54
case INDEX_op_tci_movi_i32:
50
+ mulu64(&l, &h, ai, bi);
55
t0 = *tb_ptr++;
51
+ return int128_make128(l, h);
56
t1 = tci_read_i32(&tb_ptr);
52
}
57
- tci_write_reg32(regs, t0, t1);
53
58
+ tci_write_reg(regs, t0, t1);
54
static Int128 int128_sub_borrow(Int128 a, Int128 b, int borrow)
59
break;
60
61
/* Load/store operations (32 bit). */
62
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
63
t0 = *tb_ptr++;
64
t1 = tci_read_r(regs, &tb_ptr);
65
t2 = tci_read_s32(&tb_ptr);
66
- tci_write_reg32(regs, t0, *(uint32_t *)(t1 + t2));
67
+ tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
68
break;
69
case INDEX_op_st8_i32:
70
t0 = tci_read_r8(regs, &tb_ptr);
71
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
72
t0 = *tb_ptr++;
73
t1 = tci_read_ri32(regs, &tb_ptr);
74
t2 = tci_read_ri32(regs, &tb_ptr);
75
- tci_write_reg32(regs, t0, t1 + t2);
76
+ tci_write_reg(regs, t0, t1 + t2);
77
break;
78
case INDEX_op_sub_i32:
79
t0 = *tb_ptr++;
80
t1 = tci_read_ri32(regs, &tb_ptr);
81
t2 = tci_read_ri32(regs, &tb_ptr);
82
- tci_write_reg32(regs, t0, t1 - t2);
83
+ tci_write_reg(regs, t0, t1 - t2);
84
break;
85
case INDEX_op_mul_i32:
86
t0 = *tb_ptr++;
87
t1 = tci_read_ri32(regs, &tb_ptr);
88
t2 = tci_read_ri32(regs, &tb_ptr);
89
- tci_write_reg32(regs, t0, t1 * t2);
90
+ tci_write_reg(regs, t0, t1 * t2);
91
break;
92
#if TCG_TARGET_HAS_div_i32
93
case INDEX_op_div_i32:
94
t0 = *tb_ptr++;
95
t1 = tci_read_ri32(regs, &tb_ptr);
96
t2 = tci_read_ri32(regs, &tb_ptr);
97
- tci_write_reg32(regs, t0, (int32_t)t1 / (int32_t)t2);
98
+ tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
99
break;
100
case INDEX_op_divu_i32:
101
t0 = *tb_ptr++;
102
t1 = tci_read_ri32(regs, &tb_ptr);
103
t2 = tci_read_ri32(regs, &tb_ptr);
104
- tci_write_reg32(regs, t0, t1 / t2);
105
+ tci_write_reg(regs, t0, t1 / t2);
106
break;
107
case INDEX_op_rem_i32:
108
t0 = *tb_ptr++;
109
t1 = tci_read_ri32(regs, &tb_ptr);
110
t2 = tci_read_ri32(regs, &tb_ptr);
111
- tci_write_reg32(regs, t0, (int32_t)t1 % (int32_t)t2);
112
+ tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
113
break;
114
case INDEX_op_remu_i32:
115
t0 = *tb_ptr++;
116
t1 = tci_read_ri32(regs, &tb_ptr);
117
t2 = tci_read_ri32(regs, &tb_ptr);
118
- tci_write_reg32(regs, t0, t1 % t2);
119
+ tci_write_reg(regs, t0, t1 % t2);
120
break;
121
#elif TCG_TARGET_HAS_div2_i32
122
case INDEX_op_div2_i32:
123
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
124
t0 = *tb_ptr++;
125
t1 = tci_read_ri32(regs, &tb_ptr);
126
t2 = tci_read_ri32(regs, &tb_ptr);
127
- tci_write_reg32(regs, t0, t1 & t2);
128
+ tci_write_reg(regs, t0, t1 & t2);
129
break;
130
case INDEX_op_or_i32:
131
t0 = *tb_ptr++;
132
t1 = tci_read_ri32(regs, &tb_ptr);
133
t2 = tci_read_ri32(regs, &tb_ptr);
134
- tci_write_reg32(regs, t0, t1 | t2);
135
+ tci_write_reg(regs, t0, t1 | t2);
136
break;
137
case INDEX_op_xor_i32:
138
t0 = *tb_ptr++;
139
t1 = tci_read_ri32(regs, &tb_ptr);
140
t2 = tci_read_ri32(regs, &tb_ptr);
141
- tci_write_reg32(regs, t0, t1 ^ t2);
142
+ tci_write_reg(regs, t0, t1 ^ t2);
143
break;
144
145
/* Shift/rotate operations (32 bit). */
146
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
147
t0 = *tb_ptr++;
148
t1 = tci_read_ri32(regs, &tb_ptr);
149
t2 = tci_read_ri32(regs, &tb_ptr);
150
- tci_write_reg32(regs, t0, t1 << (t2 & 31));
151
+ tci_write_reg(regs, t0, t1 << (t2 & 31));
152
break;
153
case INDEX_op_shr_i32:
154
t0 = *tb_ptr++;
155
t1 = tci_read_ri32(regs, &tb_ptr);
156
t2 = tci_read_ri32(regs, &tb_ptr);
157
- tci_write_reg32(regs, t0, t1 >> (t2 & 31));
158
+ tci_write_reg(regs, t0, t1 >> (t2 & 31));
159
break;
160
case INDEX_op_sar_i32:
161
t0 = *tb_ptr++;
162
t1 = tci_read_ri32(regs, &tb_ptr);
163
t2 = tci_read_ri32(regs, &tb_ptr);
164
- tci_write_reg32(regs, t0, ((int32_t)t1 >> (t2 & 31)));
165
+ tci_write_reg(regs, t0, ((int32_t)t1 >> (t2 & 31)));
166
break;
167
#if TCG_TARGET_HAS_rot_i32
168
case INDEX_op_rotl_i32:
169
t0 = *tb_ptr++;
170
t1 = tci_read_ri32(regs, &tb_ptr);
171
t2 = tci_read_ri32(regs, &tb_ptr);
172
- tci_write_reg32(regs, t0, rol32(t1, t2 & 31));
173
+ tci_write_reg(regs, t0, rol32(t1, t2 & 31));
174
break;
175
case INDEX_op_rotr_i32:
176
t0 = *tb_ptr++;
177
t1 = tci_read_ri32(regs, &tb_ptr);
178
t2 = tci_read_ri32(regs, &tb_ptr);
179
- tci_write_reg32(regs, t0, ror32(t1, t2 & 31));
180
+ tci_write_reg(regs, t0, ror32(t1, t2 & 31));
181
break;
182
#endif
183
#if TCG_TARGET_HAS_deposit_i32
184
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
185
tmp16 = *tb_ptr++;
186
tmp8 = *tb_ptr++;
187
tmp32 = (((1 << tmp8) - 1) << tmp16);
188
- tci_write_reg32(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
189
+ tci_write_reg(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
190
break;
191
#endif
192
case INDEX_op_brcond_i32:
193
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
194
case INDEX_op_ext8s_i32:
195
t0 = *tb_ptr++;
196
t1 = tci_read_r8s(regs, &tb_ptr);
197
- tci_write_reg32(regs, t0, t1);
198
+ tci_write_reg(regs, t0, t1);
199
break;
200
#endif
201
#if TCG_TARGET_HAS_ext16s_i32
202
case INDEX_op_ext16s_i32:
203
t0 = *tb_ptr++;
204
t1 = tci_read_r16s(regs, &tb_ptr);
205
- tci_write_reg32(regs, t0, t1);
206
+ tci_write_reg(regs, t0, t1);
207
break;
208
#endif
209
#if TCG_TARGET_HAS_ext8u_i32
210
case INDEX_op_ext8u_i32:
211
t0 = *tb_ptr++;
212
t1 = tci_read_r8(regs, &tb_ptr);
213
- tci_write_reg32(regs, t0, t1);
214
+ tci_write_reg(regs, t0, t1);
215
break;
216
#endif
217
#if TCG_TARGET_HAS_ext16u_i32
218
case INDEX_op_ext16u_i32:
219
t0 = *tb_ptr++;
220
t1 = tci_read_r16(regs, &tb_ptr);
221
- tci_write_reg32(regs, t0, t1);
222
+ tci_write_reg(regs, t0, t1);
223
break;
224
#endif
225
#if TCG_TARGET_HAS_bswap16_i32
226
case INDEX_op_bswap16_i32:
227
t0 = *tb_ptr++;
228
t1 = tci_read_r16(regs, &tb_ptr);
229
- tci_write_reg32(regs, t0, bswap16(t1));
230
+ tci_write_reg(regs, t0, bswap16(t1));
231
break;
232
#endif
233
#if TCG_TARGET_HAS_bswap32_i32
234
case INDEX_op_bswap32_i32:
235
t0 = *tb_ptr++;
236
t1 = tci_read_r32(regs, &tb_ptr);
237
- tci_write_reg32(regs, t0, bswap32(t1));
238
+ tci_write_reg(regs, t0, bswap32(t1));
239
break;
240
#endif
241
#if TCG_TARGET_HAS_not_i32
242
case INDEX_op_not_i32:
243
t0 = *tb_ptr++;
244
t1 = tci_read_r32(regs, &tb_ptr);
245
- tci_write_reg32(regs, t0, ~t1);
246
+ tci_write_reg(regs, t0, ~t1);
247
break;
248
#endif
249
#if TCG_TARGET_HAS_neg_i32
250
case INDEX_op_neg_i32:
251
t0 = *tb_ptr++;
252
t1 = tci_read_r32(regs, &tb_ptr);
253
- tci_write_reg32(regs, t0, -t1);
254
+ tci_write_reg(regs, t0, -t1);
255
break;
256
#endif
257
#if TCG_TARGET_REG_BITS == 64
258
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
259
t0 = *tb_ptr++;
260
t1 = tci_read_r(regs, &tb_ptr);
261
t2 = tci_read_s32(&tb_ptr);
262
- tci_write_reg32(regs, t0, *(uint32_t *)(t1 + t2));
263
+ tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
264
break;
265
case INDEX_op_ld32s_i64:
266
t0 = *tb_ptr++;
267
--
55
--
268
2.25.1
56
2.43.0
269
270
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Initialize x with accumulated via direct assignment,
2
rather than multiplying by 1.
2
3
3
The code is currently comparing c2 to the type promotion of
4
Reviewed-by: Brian Cain <brian.cain@oss.qualcomm.com>
4
uint32_t and int32_t. That is, the conversion rules are as:
5
6
(common_type) c2 == (common_type) (uint32_t)
7
(is_unsigned
8
? (uint32_t)c2
9
: (uint32_t)(int32_t)c2)
10
11
In the signed case we lose the desired sign extensions because
12
of the argument promotion rules of the ternary operator.
13
14
Solve the problem by doing the round-trip parsing through the
15
intermediate type and back to the desired common type (all at
16
one expression).
17
18
Fixes: a534bb15f30 ("tcg/s390: Use constant pool for cmpi")
19
Tested-by: Richard W.M. Jones <rjones@redhat.com>
20
Reviewed-by: David Hildenbrand <david@redhat.com>
21
Reported-by: Miroslav Rezanina <mrezanin@redhat.com>
22
Reported-by: Richard W.M. Jones <rjones@redhat.com>
23
Suggested-by: David Hildenbrand <david@redhat.com>
24
Suggested-by: Eric Blake <eblake@redhat.com>
25
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
26
Message-Id: <20210204182902.1742826-1-f4bug@amsat.org>
27
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
28
---
6
---
29
tcg/s390/tcg-target.c.inc | 2 +-
7
target/hexagon/fma_emu.c | 2 +-
30
1 file changed, 1 insertion(+), 1 deletion(-)
8
1 file changed, 1 insertion(+), 1 deletion(-)
31
9
32
diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
10
diff --git a/target/hexagon/fma_emu.c b/target/hexagon/fma_emu.c
33
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/s390/tcg-target.c.inc
12
--- a/target/hexagon/fma_emu.c
35
+++ b/tcg/s390/tcg-target.c.inc
13
+++ b/target/hexagon/fma_emu.c
36
@@ -XXX,XX +XXX,XX @@ static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
14
@@ -XXX,XX +XXX,XX @@ float64 internal_mpyhh(float64 a, float64 b,
37
op = (is_unsigned ? RIL_CLFI : RIL_CFI);
15
float64_is_infinity(b)) {
38
tcg_out_insn_RIL(s, op, r1, c2);
16
return float64_mul(a, b, fp_status);
39
goto exit;
17
}
40
- } else if (c2 == (is_unsigned ? (uint32_t)c2 : (int32_t)c2)) {
18
- x.mant = int128_mul_6464(accumulated, 1);
41
+ } else if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
19
+ x.mant = int128_make64(accumulated);
42
op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
20
x.sticky = sticky;
43
tcg_out_insn_RIL(s, op, r1, c2);
21
prod = fGETUWORD(1, float64_getmant(a)) * fGETUWORD(1, float64_getmant(b));
44
goto exit;
22
x.mant = int128_add(x.mant, int128_mul_6464(prod, 0x100000000ULL));
45
--
23
--
46
2.25.1
24
2.43.0
47
48
diff view generated by jsdifflib
1
From: Claudio Fontana <cfontana@suse.de>
1
Convert all targets simultaneously, as the gen_intermediate_code
2
function disappears from the target. While there are possible
3
workarounds, they're larger than simply performing the conversion.
2
4
3
we cannot in principle make the TCG Operations field definitions
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
conditional on CONFIG_TCG in code that is included by both common_ss
5
and specific_ss modules.
6
7
Therefore, what we can do safely to restrict the TCG fields to TCG-only
8
builds, is to move all tcg cpu operations into a separate header file,
9
which is only included by TCG, target-specific code.
10
11
This leaves just a NULL pointer in the cpu.h for the non-TCG builds.
12
13
This also tidies up the code in all targets a bit, having all TCG cpu
14
operations neatly contained by a dedicated data struct.
15
16
Signed-off-by: Claudio Fontana <cfontana@suse.de>
17
Message-Id: <20210204163931.7358-16-cfontana@suse.de>
18
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
19
---
7
---
20
include/hw/core/cpu.h | 103 ++------------------------------
8
include/exec/translator.h | 14 --------------
21
include/hw/core/tcg-cpu-ops.h | 97 ++++++++++++++++++++++++++++++
9
include/hw/core/tcg-cpu-ops.h | 13 +++++++++++++
22
target/arm/internals.h | 6 ++
10
target/alpha/cpu.h | 2 ++
23
accel/tcg/cpu-exec.c | 27 +++++----
11
target/arm/internals.h | 2 ++
24
accel/tcg/cputlb.c | 35 +++++++++--
12
target/avr/cpu.h | 2 ++
25
accel/tcg/user-exec.c | 9 +--
13
target/hexagon/cpu.h | 2 ++
26
hw/mips/jazz.c | 7 ++-
14
target/hppa/cpu.h | 2 ++
27
softmmu/physmem.c | 13 ++--
15
target/i386/tcg/helper-tcg.h | 2 ++
28
target/alpha/cpu.c | 21 +++++--
16
target/loongarch/internals.h | 2 ++
29
target/arm/cpu.c | 41 ++++++++-----
17
target/m68k/cpu.h | 2 ++
30
target/arm/cpu64.c | 7 +--
18
target/microblaze/cpu.h | 2 ++
31
target/arm/cpu_tcg.c | 28 +++++++--
19
target/mips/tcg/tcg-internal.h | 2 ++
32
target/avr/cpu.c | 19 ++++--
20
target/openrisc/cpu.h | 2 ++
33
target/avr/helper.c | 5 +-
21
target/ppc/cpu.h | 2 ++
34
target/cris/cpu.c | 43 ++++++++-----
22
target/riscv/cpu.h | 3 +++
35
target/cris/helper.c | 5 +-
23
target/rx/cpu.h | 2 ++
36
target/hppa/cpu.c | 22 ++++---
24
target/s390x/s390x-internal.h | 2 ++
37
target/i386/tcg/tcg-cpu.c | 26 ++++----
25
target/sh4/cpu.h | 2 ++
38
target/lm32/cpu.c | 19 ++++--
26
target/sparc/cpu.h | 2 ++
39
target/m68k/cpu.c | 19 ++++--
27
target/tricore/cpu.h | 2 ++
40
target/microblaze/cpu.c | 25 +++++---
28
target/xtensa/cpu.h | 2 ++
41
target/mips/cpu.c | 36 +++++++----
29
accel/tcg/cpu-exec.c | 8 +++++---
42
target/moxie/cpu.c | 15 ++++-
30
accel/tcg/translate-all.c | 8 +++++---
43
target/nios2/cpu.c | 18 ++++--
31
target/alpha/cpu.c | 1 +
44
target/openrisc/cpu.c | 17 ++++--
32
target/alpha/translate.c | 4 ++--
45
target/riscv/cpu.c | 23 ++++---
33
target/arm/cpu.c | 1 +
46
target/rx/cpu.c | 20 +++++--
34
target/arm/tcg/cpu-v7m.c | 1 +
47
target/s390x/cpu.c | 33 ++++++----
35
target/arm/tcg/translate.c | 5 ++---
48
target/sh4/cpu.c | 21 +++++--
36
target/avr/cpu.c | 1 +
49
target/sparc/cpu.c | 25 +++++---
37
target/avr/translate.c | 6 +++---
50
target/tilegx/cpu.c | 17 ++++--
38
target/hexagon/cpu.c | 1 +
51
target/tricore/cpu.c | 12 +++-
39
target/hexagon/translate.c | 4 ++--
52
target/unicore32/cpu.c | 17 ++++--
40
target/hppa/cpu.c | 1 +
53
target/xtensa/cpu.c | 23 ++++---
41
target/hppa/translate.c | 4 ++--
54
target/ppc/translate_init.c.inc | 33 ++++++----
42
target/i386/tcg/tcg-cpu.c | 1 +
55
MAINTAINERS | 1 +
43
target/i386/tcg/translate.c | 5 ++---
56
36 files changed, 582 insertions(+), 306 deletions(-)
44
target/loongarch/cpu.c | 1 +
57
create mode 100644 include/hw/core/tcg-cpu-ops.h
45
target/loongarch/tcg/translate.c | 4 ++--
46
target/m68k/cpu.c | 1 +
47
target/m68k/translate.c | 4 ++--
48
target/microblaze/cpu.c | 1 +
49
target/microblaze/translate.c | 4 ++--
50
target/mips/cpu.c | 1 +
51
target/mips/tcg/translate.c | 4 ++--
52
target/openrisc/cpu.c | 1 +
53
target/openrisc/translate.c | 4 ++--
54
target/ppc/cpu_init.c | 1 +
55
target/ppc/translate.c | 4 ++--
56
target/riscv/tcg/tcg-cpu.c | 1 +
57
target/riscv/translate.c | 4 ++--
58
target/rx/cpu.c | 1 +
59
target/rx/translate.c | 4 ++--
60
target/s390x/cpu.c | 1 +
61
target/s390x/tcg/translate.c | 4 ++--
62
target/sh4/cpu.c | 1 +
63
target/sh4/translate.c | 4 ++--
64
target/sparc/cpu.c | 1 +
65
target/sparc/translate.c | 4 ++--
66
target/tricore/cpu.c | 1 +
67
target/tricore/translate.c | 5 ++---
68
target/xtensa/cpu.c | 1 +
69
target/xtensa/translate.c | 4 ++--
70
62 files changed, 121 insertions(+), 62 deletions(-)
58
71
59
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
72
diff --git a/include/exec/translator.h b/include/exec/translator.h
60
index XXXXXXX..XXXXXXX 100644
73
index XXXXXXX..XXXXXXX 100644
61
--- a/include/hw/core/cpu.h
74
--- a/include/exec/translator.h
62
+++ b/include/hw/core/cpu.h
75
+++ b/include/exec/translator.h
63
@@ -XXX,XX +XXX,XX @@ typedef struct CPUWatchpoint CPUWatchpoint;
76
@@ -XXX,XX +XXX,XX @@
64
77
#include "qemu/bswap.h"
65
struct TranslationBlock;
78
#include "exec/vaddr.h"
66
79
67
-/**
80
-/**
68
- * struct TcgCpuOperations: TCG operations specific to a CPU class
81
- * gen_intermediate_code
82
- * @cpu: cpu context
83
- * @tb: translation block
84
- * @max_insns: max number of instructions to translate
85
- * @pc: guest virtual program counter address
86
- * @host_pc: host physical program counter address
87
- *
88
- * This function must be provided by the target, which should create
89
- * the target-specific DisasContext, and then invoke translator_loop.
69
- */
90
- */
70
-typedef struct TcgCpuOperations {
91
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
71
- /**
92
- vaddr pc, void *host_pc);
72
- * @initialize: Initalize TCG state
73
- *
74
- * Called when the first CPU is realized.
75
- */
76
- void (*initialize)(void);
77
- /**
78
- * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
79
- *
80
- * This is called when we abandon execution of a TB before starting it,
81
- * and must set all parts of the CPU state which the previous TB in the
82
- * chain may not have updated.
83
- * By default, when this is NULL, a call is made to @set_pc(tb->pc).
84
- *
85
- * If more state needs to be restored, the target must implement a
86
- * function to restore all the state, and register it here.
87
- */
88
- void (*synchronize_from_tb)(CPUState *cpu,
89
- const struct TranslationBlock *tb);
90
- /** @cpu_exec_enter: Callback for cpu_exec preparation */
91
- void (*cpu_exec_enter)(CPUState *cpu);
92
- /** @cpu_exec_exit: Callback for cpu_exec cleanup */
93
- void (*cpu_exec_exit)(CPUState *cpu);
94
- /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
95
- bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
96
- /** @do_interrupt: Callback for interrupt handling. */
97
- void (*do_interrupt)(CPUState *cpu);
98
- /**
99
- * @tlb_fill: Handle a softmmu tlb miss or user-only address fault
100
- *
101
- * For system mode, if the access is valid, call tlb_set_page
102
- * and return true; if the access is invalid, and probe is
103
- * true, return false; otherwise raise an exception and do
104
- * not return. For user-only mode, always raise an exception
105
- * and do not return.
106
- */
107
- bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
108
- MMUAccessType access_type, int mmu_idx,
109
- bool probe, uintptr_t retaddr);
110
- /** @debug_excp_handler: Callback for handling debug exceptions */
111
- void (*debug_excp_handler)(CPUState *cpu);
112
-
113
- /**
114
- * @do_transaction_failed: Callback for handling failed memory transactions
115
- * (ie bus faults or external aborts; not MMU faults)
116
- */
117
- void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
118
- unsigned size, MMUAccessType access_type,
119
- int mmu_idx, MemTxAttrs attrs,
120
- MemTxResult response, uintptr_t retaddr);
121
- /**
122
- * @do_unaligned_access: Callback for unaligned access handling
123
- */
124
- void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
125
- MMUAccessType access_type,
126
- int mmu_idx, uintptr_t retaddr);
127
- /**
128
- * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
129
- */
130
- vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
131
-
132
- /**
133
- * @debug_check_watchpoint: return true if the architectural
134
- * watchpoint whose address has matched should really fire, used by ARM
135
- */
136
- bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
137
-
138
-} TcgCpuOperations;
139
+/* see tcg-cpu-ops.h */
140
+struct TCGCPUOps;
141
142
/**
143
* CPUClass:
144
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
145
int gdb_num_core_regs;
146
bool gdb_stop_before_watchpoint;
147
148
- TcgCpuOperations tcg_ops;
149
+ /* when TCG is not available, this pointer is NULL */
150
+ struct TCGCPUOps *tcg_ops;
151
};
152
153
/*
154
@@ -XXX,XX +XXX,XX @@ CPUState *cpu_by_arch_id(int64_t id);
155
156
void cpu_interrupt(CPUState *cpu, int mask);
157
158
-static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
159
- MMUAccessType access_type,
160
- int mmu_idx, uintptr_t retaddr)
161
-{
162
- CPUClass *cc = CPU_GET_CLASS(cpu);
163
-
164
- cc->tcg_ops.do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
165
-}
166
-
167
-static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
168
- vaddr addr, unsigned size,
169
- MMUAccessType access_type,
170
- int mmu_idx, MemTxAttrs attrs,
171
- MemTxResult response,
172
- uintptr_t retaddr)
173
-{
174
- CPUClass *cc = CPU_GET_CLASS(cpu);
175
-
176
- if (!cpu->ignore_memory_transaction_failures &&
177
- cc->tcg_ops.do_transaction_failed) {
178
- cc->tcg_ops.do_transaction_failed(cpu, physaddr, addr, size,
179
- access_type, mmu_idx, attrs,
180
- response, retaddr);
181
- }
182
-}
183
-
93
-
184
/**
94
/**
185
* cpu_set_pc:
95
* DisasJumpType:
186
* @cpu: The CPU to set the program counter for.
96
* @DISAS_NEXT: Next instruction in program order.
187
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
97
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
188
new file mode 100644
98
index XXXXXXX..XXXXXXX 100644
189
index XXXXXXX..XXXXXXX
99
--- a/include/hw/core/tcg-cpu-ops.h
190
--- /dev/null
191
+++ b/include/hw/core/tcg-cpu-ops.h
100
+++ b/include/hw/core/tcg-cpu-ops.h
192
@@ -XXX,XX +XXX,XX @@
101
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps {
193
+/*
102
* Called when the first CPU is realized.
194
+ * TCG CPU-specific operations
103
*/
195
+ *
104
void (*initialize)(void);
196
+ * Copyright 2021 SUSE LLC
197
+ *
198
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
199
+ * See the COPYING file in the top-level directory.
200
+ */
201
+
202
+#ifndef TCG_CPU_OPS_H
203
+#define TCG_CPU_OPS_H
204
+
205
+#include "hw/core/cpu.h"
206
+
207
+struct TCGCPUOps {
208
+ /**
105
+ /**
209
+ * @initialize: Initalize TCG state
106
+ * @translate_code: Translate guest instructions to TCGOps
107
+ * @cpu: cpu context
108
+ * @tb: translation block
109
+ * @max_insns: max number of instructions to translate
110
+ * @pc: guest virtual program counter address
111
+ * @host_pc: host physical program counter address
210
+ *
112
+ *
211
+ * Called when the first CPU is realized.
113
+ * This function must be provided by the target, which should create
114
+ * the target-specific DisasContext, and then invoke translator_loop.
212
+ */
115
+ */
213
+ void (*initialize)(void);
116
+ void (*translate_code)(CPUState *cpu, TranslationBlock *tb,
214
+ /**
117
+ int *max_insns, vaddr pc, void *host_pc);
215
+ * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
118
/**
216
+ *
119
* @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
217
+ * This is called when we abandon execution of a TB before starting it,
120
*
218
+ * and must set all parts of the CPU state which the previous TB in the
121
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
219
+ * chain may not have updated.
122
index XXXXXXX..XXXXXXX 100644
220
+ * By default, when this is NULL, a call is made to @set_pc(tb->pc).
123
--- a/target/alpha/cpu.h
221
+ *
124
+++ b/target/alpha/cpu.h
222
+ * If more state needs to be restored, the target must implement a
125
@@ -XXX,XX +XXX,XX @@ enum {
223
+ * function to restore all the state, and register it here.
126
};
224
+ */
127
225
+ void (*synchronize_from_tb)(CPUState *cpu,
128
void alpha_translate_init(void);
226
+ const struct TranslationBlock *tb);
129
+void alpha_translate_code(CPUState *cs, TranslationBlock *tb,
227
+ /** @cpu_exec_enter: Callback for cpu_exec preparation */
130
+ int *max_insns, vaddr pc, void *host_pc);
228
+ void (*cpu_exec_enter)(CPUState *cpu);
131
229
+ /** @cpu_exec_exit: Callback for cpu_exec cleanup */
132
#define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU
230
+ void (*cpu_exec_exit)(CPUState *cpu);
133
231
+ /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
232
+ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
233
+ /**
234
+ * @do_interrupt: Callback for interrupt handling.
235
+ *
236
+ * note that this is in general SOFTMMU only, but it actually isn't
237
+ * because of an x86 hack (accel/tcg/cpu-exec.c), so we cannot put it
238
+ * in the SOFTMMU section in general.
239
+ */
240
+ void (*do_interrupt)(CPUState *cpu);
241
+ /**
242
+ * @tlb_fill: Handle a softmmu tlb miss or user-only address fault
243
+ *
244
+ * For system mode, if the access is valid, call tlb_set_page
245
+ * and return true; if the access is invalid, and probe is
246
+ * true, return false; otherwise raise an exception and do
247
+ * not return. For user-only mode, always raise an exception
248
+ * and do not return.
249
+ */
250
+ bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
251
+ MMUAccessType access_type, int mmu_idx,
252
+ bool probe, uintptr_t retaddr);
253
+ /** @debug_excp_handler: Callback for handling debug exceptions */
254
+ void (*debug_excp_handler)(CPUState *cpu);
255
+
256
+#ifdef NEED_CPU_H
257
+#ifdef CONFIG_SOFTMMU
258
+ /**
259
+ * @do_transaction_failed: Callback for handling failed memory transactions
260
+ * (ie bus faults or external aborts; not MMU faults)
261
+ */
262
+ void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
263
+ unsigned size, MMUAccessType access_type,
264
+ int mmu_idx, MemTxAttrs attrs,
265
+ MemTxResult response, uintptr_t retaddr);
266
+ /**
267
+ * @do_unaligned_access: Callback for unaligned access handling
268
+ */
269
+ void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
270
+ MMUAccessType access_type,
271
+ int mmu_idx, uintptr_t retaddr);
272
+
273
+ /**
274
+ * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
275
+ */
276
+ vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
277
+
278
+ /**
279
+ * @debug_check_watchpoint: return true if the architectural
280
+ * watchpoint whose address has matched should really fire, used by ARM
281
+ */
282
+ bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
283
+
284
+#endif /* CONFIG_SOFTMMU */
285
+#endif /* NEED_CPU_H */
286
+
287
+};
288
+
289
+#endif /* TCG_CPU_OPS_H */
290
diff --git a/target/arm/internals.h b/target/arm/internals.h
134
diff --git a/target/arm/internals.h b/target/arm/internals.h
291
index XXXXXXX..XXXXXXX 100644
135
index XXXXXXX..XXXXXXX 100644
292
--- a/target/arm/internals.h
136
--- a/target/arm/internals.h
293
+++ b/target/arm/internals.h
137
+++ b/target/arm/internals.h
294
@@ -XXX,XX +XXX,XX @@ static inline int r14_bank_number(int mode)
138
@@ -XXX,XX +XXX,XX @@ void init_cpreg_list(ARMCPU *cpu);
139
295
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
140
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
296
void arm_translate_init(void);
141
void arm_translate_init(void);
297
142
+void arm_translate_code(CPUState *cs, TranslationBlock *tb,
298
+#ifdef CONFIG_TCG
143
+ int *max_insns, vaddr pc, void *host_pc);
299
+void arm_cpu_synchronize_from_tb(CPUState *cs,
144
300
+ const struct TranslationBlock *tb);
145
void arm_cpu_register_gdb_commands(ARMCPU *cpu);
301
+#endif /* CONFIG_TCG */
146
void aarch64_cpu_register_gdb_commands(ARMCPU *cpu, GString *,
147
diff --git a/target/avr/cpu.h b/target/avr/cpu.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/avr/cpu.h
150
+++ b/target/avr/cpu.h
151
@@ -XXX,XX +XXX,XX @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
152
}
153
154
void avr_cpu_tcg_init(void);
155
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
156
+ int *max_insns, vaddr pc, void *host_pc);
157
158
int cpu_avr_exec(CPUState *cpu);
159
160
diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h
161
index XXXXXXX..XXXXXXX 100644
162
--- a/target/hexagon/cpu.h
163
+++ b/target/hexagon/cpu.h
164
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
165
typedef HexagonCPU ArchCPU;
166
167
void hexagon_translate_init(void);
168
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
169
+ int *max_insns, vaddr pc, void *host_pc);
170
171
#include "exec/cpu-all.h"
172
173
diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h
174
index XXXXXXX..XXXXXXX 100644
175
--- a/target/hppa/cpu.h
176
+++ b/target/hppa/cpu.h
177
@@ -XXX,XX +XXX,XX @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
178
}
179
180
void hppa_translate_init(void);
181
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
182
+ int *max_insns, vaddr pc, void *host_pc);
183
184
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
185
186
diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/i386/tcg/helper-tcg.h
189
+++ b/target/i386/tcg/helper-tcg.h
190
@@ -XXX,XX +XXX,XX @@ static inline target_long lshift(target_long x, int n)
191
192
/* translate.c */
193
void tcg_x86_init(void);
194
+void x86_translate_code(CPUState *cs, TranslationBlock *tb,
195
+ int *max_insns, vaddr pc, void *host_pc);
196
197
/* excp_helper.c */
198
G_NORETURN void raise_exception(CPUX86State *env, int exception_index);
199
diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/loongarch/internals.h
202
+++ b/target/loongarch/internals.h
203
@@ -XXX,XX +XXX,XX @@
204
#define TARGET_VIRT_MASK MAKE_64BIT_MASK(0, TARGET_VIRT_ADDR_SPACE_BITS)
205
206
void loongarch_translate_init(void);
207
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
208
+ int *max_insns, vaddr pc, void *host_pc);
209
210
void G_NORETURN do_raise_exception(CPULoongArchState *env,
211
uint32_t exception,
212
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/m68k/cpu.h
215
+++ b/target/m68k/cpu.h
216
@@ -XXX,XX +XXX,XX @@ int m68k_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
217
int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
218
219
void m68k_tcg_init(void);
220
+void m68k_translate_code(CPUState *cs, TranslationBlock *tb,
221
+ int *max_insns, vaddr pc, void *host_pc);
222
void m68k_cpu_init_gdb(M68kCPU *cpu);
223
uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
224
void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
225
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
226
index XXXXXXX..XXXXXXX 100644
227
--- a/target/microblaze/cpu.h
228
+++ b/target/microblaze/cpu.h
229
@@ -XXX,XX +XXX,XX @@ static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
230
}
231
232
void mb_tcg_init(void);
233
+void mb_translate_code(CPUState *cs, TranslationBlock *tb,
234
+ int *max_insns, vaddr pc, void *host_pc);
235
236
#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
237
238
diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/mips/tcg/tcg-internal.h
241
+++ b/target/mips/tcg/tcg-internal.h
242
@@ -XXX,XX +XXX,XX @@
243
#include "cpu.h"
244
245
void mips_tcg_init(void);
246
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
247
+ int *max_insns, vaddr pc, void *host_pc);
248
249
void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
250
G_NORETURN void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
251
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
252
index XXXXXXX..XXXXXXX 100644
253
--- a/target/openrisc/cpu.h
254
+++ b/target/openrisc/cpu.h
255
@@ -XXX,XX +XXX,XX @@ void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
256
int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
257
int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
258
void openrisc_translate_init(void);
259
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
260
+ int *max_insns, vaddr pc, void *host_pc);
261
int print_insn_or1k(bfd_vma addr, disassemble_info *info);
262
263
#ifndef CONFIG_USER_ONLY
264
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
265
index XXXXXXX..XXXXXXX 100644
266
--- a/target/ppc/cpu.h
267
+++ b/target/ppc/cpu.h
268
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_ppc_cpu;
269
270
/*****************************************************************************/
271
void ppc_translate_init(void);
272
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
273
+ int *max_insns, vaddr pc, void *host_pc);
274
275
#if !defined(CONFIG_USER_ONLY)
276
void ppc_store_sdr1(CPUPPCState *env, target_ulong value);
277
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
278
index XXXXXXX..XXXXXXX 100644
279
--- a/target/riscv/cpu.h
280
+++ b/target/riscv/cpu.h
281
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
282
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
283
284
void riscv_translate_init(void);
285
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
286
+ int *max_insns, vaddr pc, void *host_pc);
302
+
287
+
303
+
288
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
304
enum arm_fprounding {
289
uint32_t exception, uintptr_t pc);
305
FPROUNDING_TIEEVEN,
290
306
FPROUNDING_POSINF,
291
diff --git a/target/rx/cpu.h b/target/rx/cpu.h
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/rx/cpu.h
294
+++ b/target/rx/cpu.h
295
@@ -XXX,XX +XXX,XX @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
296
int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
297
298
void rx_translate_init(void);
299
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
300
+ int *max_insns, vaddr pc, void *host_pc);
301
void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte);
302
303
#include "exec/cpu-all.h"
304
diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h
305
index XXXXXXX..XXXXXXX 100644
306
--- a/target/s390x/s390x-internal.h
307
+++ b/target/s390x/s390x-internal.h
308
@@ -XXX,XX +XXX,XX @@ void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3,
309
310
/* translate.c */
311
void s390x_translate_init(void);
312
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
313
+ int *max_insns, vaddr pc, void *host_pc);
314
void s390x_restore_state_to_opc(CPUState *cs,
315
const TranslationBlock *tb,
316
const uint64_t *data);
317
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/target/sh4/cpu.h
320
+++ b/target/sh4/cpu.h
321
@@ -XXX,XX +XXX,XX @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
322
uintptr_t retaddr);
323
324
void sh4_translate_init(void);
325
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
326
+ int *max_insns, vaddr pc, void *host_pc);
327
328
#if !defined(CONFIG_USER_ONLY)
329
hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
330
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/sparc/cpu.h
333
+++ b/target/sparc/cpu.h
334
@@ -XXX,XX +XXX,XX @@ int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
335
336
/* translate.c */
337
void sparc_tcg_init(void);
338
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
339
+ int *max_insns, vaddr pc, void *host_pc);
340
341
/* fop_helper.c */
342
target_ulong cpu_get_fsr(CPUSPARCState *);
343
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/tricore/cpu.h
346
+++ b/target/tricore/cpu.h
347
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, PRIV, 0, 2)
348
349
void cpu_state_reset(CPUTriCoreState *s);
350
void tricore_tcg_init(void);
351
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
352
+ int *max_insns, vaddr pc, void *host_pc);
353
354
static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, vaddr *pc,
355
uint64_t *cs_base, uint32_t *flags)
356
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/xtensa/cpu.h
359
+++ b/target/xtensa/cpu.h
360
@@ -XXX,XX +XXX,XX @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
361
362
void xtensa_collect_sr_names(const XtensaConfig *config);
363
void xtensa_translate_init(void);
364
+void xtensa_translate_code(CPUState *cs, TranslationBlock *tb,
365
+ int *max_insns, vaddr pc, void *host_pc);
366
void **xtensa_get_regfile_by_name(const char *name, int entries, int bits);
367
void xtensa_breakpoint_handler(CPUState *cs);
368
void xtensa_register_core(XtensaConfigList *node);
307
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
369
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
308
index XXXXXXX..XXXXXXX 100644
370
index XXXXXXX..XXXXXXX 100644
309
--- a/accel/tcg/cpu-exec.c
371
--- a/accel/tcg/cpu-exec.c
310
+++ b/accel/tcg/cpu-exec.c
372
+++ b/accel/tcg/cpu-exec.c
311
@@ -XXX,XX +XXX,XX @@
373
@@ -XXX,XX +XXX,XX @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
312
#include "qemu-common.h"
313
#include "qemu/qemu-print.h"
314
#include "cpu.h"
315
+#include "hw/core/tcg-cpu-ops.h"
316
#include "trace.h"
317
#include "disas/disas.h"
318
#include "exec/exec-all.h"
319
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
320
TARGET_FMT_lx "] %s\n",
321
last_tb->tc.ptr, last_tb->pc,
322
lookup_symbol(last_tb->pc));
323
- if (cc->tcg_ops.synchronize_from_tb) {
324
- cc->tcg_ops.synchronize_from_tb(cpu, last_tb);
325
+ if (cc->tcg_ops->synchronize_from_tb) {
326
+ cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
327
} else {
328
assert(cc->set_pc);
329
cc->set_pc(cpu, last_tb->pc);
330
@@ -XXX,XX +XXX,XX @@ static void cpu_exec_enter(CPUState *cpu)
331
{
332
CPUClass *cc = CPU_GET_CLASS(cpu);
333
334
- if (cc->tcg_ops.cpu_exec_enter) {
335
- cc->tcg_ops.cpu_exec_enter(cpu);
336
+ if (cc->tcg_ops->cpu_exec_enter) {
337
+ cc->tcg_ops->cpu_exec_enter(cpu);
338
}
339
}
340
341
@@ -XXX,XX +XXX,XX @@ static void cpu_exec_exit(CPUState *cpu)
342
{
343
CPUClass *cc = CPU_GET_CLASS(cpu);
344
345
- if (cc->tcg_ops.cpu_exec_exit) {
346
- cc->tcg_ops.cpu_exec_exit(cpu);
347
+ if (cc->tcg_ops->cpu_exec_exit) {
348
+ cc->tcg_ops->cpu_exec_exit(cpu);
349
}
350
}
351
352
@@ -XXX,XX +XXX,XX @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
353
}
354
}
355
356
- if (cc->tcg_ops.debug_excp_handler) {
357
- cc->tcg_ops.debug_excp_handler(cpu);
358
+ if (cc->tcg_ops->debug_excp_handler) {
359
+ cc->tcg_ops->debug_excp_handler(cpu);
360
}
361
}
362
363
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
364
loop */
365
#if defined(TARGET_I386)
366
CPUClass *cc = CPU_GET_CLASS(cpu);
367
- cc->tcg_ops.do_interrupt(cpu);
368
+ cc->tcg_ops->do_interrupt(cpu);
369
#endif
370
*ret = cpu->exception_index;
371
cpu->exception_index = -1;
372
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
373
if (replay_exception()) {
374
CPUClass *cc = CPU_GET_CLASS(cpu);
375
qemu_mutex_lock_iothread();
376
- cc->tcg_ops.do_interrupt(cpu);
377
+ cc->tcg_ops->do_interrupt(cpu);
378
qemu_mutex_unlock_iothread();
379
cpu->exception_index = -1;
380
381
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
382
True when it is, and we should restart on a new TB,
383
and via longjmp via cpu_loop_exit. */
384
else {
385
- if (cc->tcg_ops.cpu_exec_interrupt &&
386
- cc->tcg_ops.cpu_exec_interrupt(cpu, interrupt_request)) {
387
+ if (cc->tcg_ops->cpu_exec_interrupt &&
388
+ cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
389
if (need_replay_interrupt(interrupt_request)) {
390
replay_interrupt();
391
}
392
@@ -XXX,XX +XXX,XX @@ void tcg_exec_realizefn(CPUState *cpu, Error **errp)
393
CPUClass *cc = CPU_GET_CLASS(cpu);
394
374
395
if (!tcg_target_initialized) {
375
if (!tcg_target_initialized) {
396
- cc->tcg_ops.initialize();
376
/* Check mandatory TCGCPUOps handlers */
397
+ cc->tcg_ops->initialize();
377
+ const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
378
#ifndef CONFIG_USER_ONLY
379
- assert(cpu->cc->tcg_ops->cpu_exec_halt);
380
- assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
381
+ assert(tcg_ops->cpu_exec_halt);
382
+ assert(tcg_ops->cpu_exec_interrupt);
383
#endif /* !CONFIG_USER_ONLY */
384
- cpu->cc->tcg_ops->initialize();
385
+ assert(tcg_ops->translate_code);
386
+ tcg_ops->initialize();
398
tcg_target_initialized = true;
387
tcg_target_initialized = true;
399
}
388
}
400
tlb_init(cpu);
389
401
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
390
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
402
index XXXXXXX..XXXXXXX 100644
391
index XXXXXXX..XXXXXXX 100644
403
--- a/accel/tcg/cputlb.c
392
--- a/accel/tcg/translate-all.c
404
+++ b/accel/tcg/cputlb.c
393
+++ b/accel/tcg/translate-all.c
405
@@ -XXX,XX +XXX,XX @@
394
@@ -XXX,XX +XXX,XX @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
406
#include "qemu/osdep.h"
395
407
#include "qemu/main-loop.h"
396
tcg_func_start(tcg_ctx);
408
#include "cpu.h"
397
409
+#include "hw/core/tcg-cpu-ops.h"
398
- tcg_ctx->cpu = env_cpu(env);
410
#include "exec/exec-all.h"
399
- gen_intermediate_code(env_cpu(env), tb, max_insns, pc, host_pc);
411
#include "exec/memory.h"
400
+ CPUState *cs = env_cpu(env);
412
#include "exec/address-spaces.h"
401
+ tcg_ctx->cpu = cs;
413
@@ -XXX,XX +XXX,XX @@ static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
402
+ cs->cc->tcg_ops->translate_code(cs, tb, max_insns, pc, host_pc);
414
* This is not a probe, so only valid return is success; failure
415
* should result in exception + longjmp to the cpu loop.
416
*/
417
- ok = cc->tcg_ops.tlb_fill(cpu, addr, size,
418
- access_type, mmu_idx, false, retaddr);
419
+ ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
420
+ access_type, mmu_idx, false, retaddr);
421
assert(ok);
422
}
423
424
+static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
425
+ MMUAccessType access_type,
426
+ int mmu_idx, uintptr_t retaddr)
427
+{
428
+ CPUClass *cc = CPU_GET_CLASS(cpu);
429
+
403
+
430
+ cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
404
assert(tb->size != 0);
431
+}
405
tcg_ctx->cpu = NULL;
432
+
406
*max_insns = tb->icount;
433
+static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
407
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
434
+ vaddr addr, unsigned size,
408
/*
435
+ MMUAccessType access_type,
409
* Overflow of code_gen_buffer, or the current slice of it.
436
+ int mmu_idx, MemTxAttrs attrs,
410
*
437
+ MemTxResult response,
411
- * TODO: We don't need to re-do gen_intermediate_code, nor
438
+ uintptr_t retaddr)
412
+ * TODO: We don't need to re-do tcg_ops->translate_code, nor
439
+{
413
* should we re-do the tcg optimization currently hidden
440
+ CPUClass *cc = CPU_GET_CLASS(cpu);
414
* inside tcg_gen_code. All that should be required is to
441
+
415
* flush the TBs, allocate a new TB, re-initialize it per
442
+ if (!cpu->ignore_memory_transaction_failures &&
443
+ cc->tcg_ops->do_transaction_failed) {
444
+ cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
445
+ access_type, mmu_idx, attrs,
446
+ response, retaddr);
447
+ }
448
+}
449
+
450
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
451
int mmu_idx, target_ulong addr, uintptr_t retaddr,
452
MMUAccessType access_type, MemOp op)
453
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
454
CPUState *cs = env_cpu(env);
455
CPUClass *cc = CPU_GET_CLASS(cs);
456
457
- if (!cc->tcg_ops.tlb_fill(cs, addr, fault_size, access_type,
458
- mmu_idx, nonfault, retaddr)) {
459
+ if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
460
+ mmu_idx, nonfault, retaddr)) {
461
/* Non-faulting page table read failed. */
462
*phost = NULL;
463
return TLB_INVALID_MASK;
464
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
465
index XXXXXXX..XXXXXXX 100644
466
--- a/accel/tcg/user-exec.c
467
+++ b/accel/tcg/user-exec.c
468
@@ -XXX,XX +XXX,XX @@
469
*/
470
#include "qemu/osdep.h"
471
#include "cpu.h"
472
+#include "hw/core/tcg-cpu-ops.h"
473
#include "disas/disas.h"
474
#include "exec/exec-all.h"
475
#include "tcg/tcg.h"
476
@@ -XXX,XX +XXX,XX @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
477
clear_helper_retaddr();
478
479
cc = CPU_GET_CLASS(cpu);
480
- cc->tcg_ops.tlb_fill(cpu, address, 0, access_type,
481
- MMU_USER_IDX, false, pc);
482
+ cc->tcg_ops->tlb_fill(cpu, address, 0, access_type,
483
+ MMU_USER_IDX, false, pc);
484
g_assert_not_reached();
485
}
486
487
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
488
} else {
489
CPUState *cpu = env_cpu(env);
490
CPUClass *cc = CPU_GET_CLASS(cpu);
491
- cc->tcg_ops.tlb_fill(cpu, addr, fault_size, access_type,
492
- MMU_USER_IDX, false, ra);
493
+ cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
494
+ MMU_USER_IDX, false, ra);
495
g_assert_not_reached();
496
}
497
}
498
diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c
499
index XXXXXXX..XXXXXXX 100644
500
--- a/hw/mips/jazz.c
501
+++ b/hw/mips/jazz.c
502
@@ -XXX,XX +XXX,XX @@
503
#include "qapi/error.h"
504
#include "qemu/error-report.h"
505
#include "qemu/help_option.h"
506
+#ifdef CONFIG_TCG
507
+#include "hw/core/tcg-cpu-ops.h"
508
+#endif /* CONFIG_TCG */
509
510
enum jazz_model_e {
511
JAZZ_MAGNUM,
512
@@ -XXX,XX +XXX,XX @@ static void mips_jazz_init(MachineState *machine,
513
*/
514
cc = CPU_GET_CLASS(cpu);
515
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
516
- real_do_transaction_failed = cc->tcg_ops.do_transaction_failed;
517
- cc->tcg_ops.do_transaction_failed = mips_jazz_do_transaction_failed;
518
+ real_do_transaction_failed = cc->tcg_ops->do_transaction_failed;
519
+ cc->tcg_ops->do_transaction_failed = mips_jazz_do_transaction_failed;
520
#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
521
522
/* allocate RAM */
523
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
524
index XXXXXXX..XXXXXXX 100644
525
--- a/softmmu/physmem.c
526
+++ b/softmmu/physmem.c
527
@@ -XXX,XX +XXX,XX @@
528
#include "qemu/cutils.h"
529
#include "qemu/cacheflush.h"
530
#include "cpu.h"
531
+
532
+#ifdef CONFIG_TCG
533
+#include "hw/core/tcg-cpu-ops.h"
534
+#endif /* CONFIG_TCG */
535
+
536
#include "exec/exec-all.h"
537
#include "exec/target_page.h"
538
#include "hw/qdev-core.h"
539
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
540
return;
541
}
542
543
- if (cc->tcg_ops.adjust_watchpoint_address) {
544
+ if (cc->tcg_ops->adjust_watchpoint_address) {
545
/* this is currently used only by ARM BE32 */
546
- addr = cc->tcg_ops.adjust_watchpoint_address(cpu, addr, len);
547
+ addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
548
}
549
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
550
if (watchpoint_address_matches(wp, addr, len)
551
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
552
wp->hitaddr = MAX(addr, wp->vaddr);
553
wp->hitattrs = attrs;
554
if (!cpu->watchpoint_hit) {
555
- if (wp->flags & BP_CPU && cc->tcg_ops.debug_check_watchpoint &&
556
- !cc->tcg_ops.debug_check_watchpoint(cpu, wp)) {
557
+ if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint &&
558
+ !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
559
wp->flags &= ~BP_WATCHPOINT_HIT;
560
continue;
561
}
562
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
416
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
563
index XXXXXXX..XXXXXXX 100644
417
index XXXXXXX..XXXXXXX 100644
564
--- a/target/alpha/cpu.c
418
--- a/target/alpha/cpu.c
565
+++ b/target/alpha/cpu.c
419
+++ b/target/alpha/cpu.c
566
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_initfn(Object *obj)
420
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
567
#endif
421
568
}
422
static const TCGCPUOps alpha_tcg_ops = {
569
423
.initialize = alpha_translate_init,
570
+#include "hw/core/tcg-cpu-ops.h"
424
+ .translate_code = alpha_translate_code,
571
+
425
.synchronize_from_tb = alpha_cpu_synchronize_from_tb,
572
+static struct TCGCPUOps alpha_tcg_ops = {
426
.restore_state_to_opc = alpha_restore_state_to_opc,
573
+ .initialize = alpha_translate_init,
427
574
+ .cpu_exec_interrupt = alpha_cpu_exec_interrupt,
428
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
575
+ .tlb_fill = alpha_cpu_tlb_fill,
429
index XXXXXXX..XXXXXXX 100644
576
+
430
--- a/target/alpha/translate.c
577
+#ifndef CONFIG_USER_ONLY
431
+++ b/target/alpha/translate.c
578
+ .do_interrupt = alpha_cpu_do_interrupt,
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
579
+ .do_transaction_failed = alpha_cpu_do_transaction_failed,
433
.tb_stop = alpha_tr_tb_stop,
580
+ .do_unaligned_access = alpha_cpu_do_unaligned_access,
434
};
581
+#endif /* !CONFIG_USER_ONLY */
435
582
+};
436
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
583
+
437
- vaddr pc, void *host_pc)
584
static void alpha_cpu_class_init(ObjectClass *oc, void *data)
438
+void alpha_translate_code(CPUState *cpu, TranslationBlock *tb,
585
{
439
+ int *max_insns, vaddr pc, void *host_pc)
586
DeviceClass *dc = DEVICE_CLASS(oc);
440
{
587
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
441
DisasContext dc;
588
442
translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
589
cc->class_by_name = alpha_cpu_class_by_name;
590
cc->has_work = alpha_cpu_has_work;
591
- cc->tcg_ops.do_interrupt = alpha_cpu_do_interrupt;
592
- cc->tcg_ops.cpu_exec_interrupt = alpha_cpu_exec_interrupt;
593
cc->dump_state = alpha_cpu_dump_state;
594
cc->set_pc = alpha_cpu_set_pc;
595
cc->gdb_read_register = alpha_cpu_gdb_read_register;
596
cc->gdb_write_register = alpha_cpu_gdb_write_register;
597
- cc->tcg_ops.tlb_fill = alpha_cpu_tlb_fill;
598
#ifndef CONFIG_USER_ONLY
599
- cc->tcg_ops.do_transaction_failed = alpha_cpu_do_transaction_failed;
600
- cc->tcg_ops.do_unaligned_access = alpha_cpu_do_unaligned_access;
601
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
602
dc->vmsd = &vmstate_alpha_cpu;
603
#endif
604
cc->disas_set_info = alpha_cpu_disas_set_info;
605
- cc->tcg_ops.initialize = alpha_translate_init;
606
607
+ cc->tcg_ops = &alpha_tcg_ops;
608
cc->gdb_num_core_regs = 67;
609
}
610
611
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
443
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
612
index XXXXXXX..XXXXXXX 100644
444
index XXXXXXX..XXXXXXX 100644
613
--- a/target/arm/cpu.c
445
--- a/target/arm/cpu.c
614
+++ b/target/arm/cpu.c
446
+++ b/target/arm/cpu.c
615
@@ -XXX,XX +XXX,XX @@
447
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
616
#include "qapi/error.h"
617
#include "qapi/visitor.h"
618
#include "cpu.h"
619
+#ifdef CONFIG_TCG
620
+#include "hw/core/tcg-cpu-ops.h"
621
+#endif /* CONFIG_TCG */
622
#include "internals.h"
623
#include "exec/exec-all.h"
624
#include "hw/qdev-properties.h"
625
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
626
}
627
628
#ifdef CONFIG_TCG
448
#ifdef CONFIG_TCG
629
-static void arm_cpu_synchronize_from_tb(CPUState *cs,
449
static const TCGCPUOps arm_tcg_ops = {
630
- const TranslationBlock *tb)
450
.initialize = arm_translate_init,
631
+void arm_cpu_synchronize_from_tb(CPUState *cs,
451
+ .translate_code = arm_translate_code,
632
+ const TranslationBlock *tb)
452
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
633
{
453
.debug_excp_handler = arm_debug_excp_handler,
634
ARMCPU *cpu = ARM_CPU(cs);
454
.restore_state_to_opc = arm_restore_state_to_opc,
635
CPUARMState *env = &cpu->env;
455
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
636
@@ -XXX,XX +XXX,XX @@ bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
456
index XXXXXXX..XXXXXXX 100644
637
found:
457
--- a/target/arm/tcg/cpu-v7m.c
638
cs->exception_index = excp_idx;
458
+++ b/target/arm/tcg/cpu-v7m.c
639
env->exception.target_el = target_el;
459
@@ -XXX,XX +XXX,XX @@ static void cortex_m55_initfn(Object *obj)
640
- cc->tcg_ops.do_interrupt(cs);
460
641
+ cc->tcg_ops->do_interrupt(cs);
461
static const TCGCPUOps arm_v7m_tcg_ops = {
642
return true;
462
.initialize = arm_translate_init,
643
}
463
+ .translate_code = arm_translate_code,
644
464
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
645
@@ -XXX,XX +XXX,XX @@ static gchar *arm_gdb_arch_name(CPUState *cs)
465
.debug_excp_handler = arm_debug_excp_handler,
646
return g_strdup("arm");
466
.restore_state_to_opc = arm_restore_state_to_opc,
647
}
467
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
648
468
index XXXXXXX..XXXXXXX 100644
649
+#ifdef CONFIG_TCG
469
--- a/target/arm/tcg/translate.c
650
+static struct TCGCPUOps arm_tcg_ops = {
470
+++ b/target/arm/tcg/translate.c
651
+ .initialize = arm_translate_init,
471
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
652
+ .synchronize_from_tb = arm_cpu_synchronize_from_tb,
472
.tb_stop = arm_tr_tb_stop,
653
+ .cpu_exec_interrupt = arm_cpu_exec_interrupt,
473
};
654
+ .tlb_fill = arm_cpu_tlb_fill,
474
655
+ .debug_excp_handler = arm_debug_excp_handler,
475
-/* generate intermediate code for basic block 'tb'. */
656
+
476
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
657
+#if !defined(CONFIG_USER_ONLY)
477
- vaddr pc, void *host_pc)
658
+ .do_interrupt = arm_cpu_do_interrupt,
478
+void arm_translate_code(CPUState *cpu, TranslationBlock *tb,
659
+ .do_transaction_failed = arm_cpu_do_transaction_failed,
479
+ int *max_insns, vaddr pc, void *host_pc)
660
+ .do_unaligned_access = arm_cpu_do_unaligned_access,
480
{
661
+ .adjust_watchpoint_address = arm_adjust_watchpoint_address,
481
DisasContext dc = { };
662
+ .debug_check_watchpoint = arm_debug_check_watchpoint,
482
const TranslatorOps *ops = &arm_translator_ops;
663
+#endif /* !CONFIG_USER_ONLY */
664
+};
665
+#endif /* CONFIG_TCG */
666
+
667
static void arm_cpu_class_init(ObjectClass *oc, void *data)
668
{
669
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
670
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
671
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
672
cc->gdb_stop_before_watchpoint = true;
673
cc->disas_set_info = arm_disas_set_info;
674
+
675
#ifdef CONFIG_TCG
676
- cc->tcg_ops.initialize = arm_translate_init;
677
- cc->tcg_ops.cpu_exec_interrupt = arm_cpu_exec_interrupt;
678
- cc->tcg_ops.synchronize_from_tb = arm_cpu_synchronize_from_tb;
679
- cc->tcg_ops.tlb_fill = arm_cpu_tlb_fill;
680
- cc->tcg_ops.debug_excp_handler = arm_debug_excp_handler;
681
-#if !defined(CONFIG_USER_ONLY)
682
- cc->tcg_ops.do_interrupt = arm_cpu_do_interrupt;
683
- cc->tcg_ops.do_transaction_failed = arm_cpu_do_transaction_failed;
684
- cc->tcg_ops.do_unaligned_access = arm_cpu_do_unaligned_access;
685
- cc->tcg_ops.adjust_watchpoint_address = arm_adjust_watchpoint_address;
686
- cc->tcg_ops.debug_check_watchpoint = arm_debug_check_watchpoint;
687
-#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
688
+ cc->tcg_ops = &arm_tcg_ops;
689
#endif /* CONFIG_TCG */
690
}
691
692
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
693
index XXXXXXX..XXXXXXX 100644
694
--- a/target/arm/cpu64.c
695
+++ b/target/arm/cpu64.c
696
@@ -XXX,XX +XXX,XX @@
697
#include "qemu/osdep.h"
698
#include "qapi/error.h"
699
#include "cpu.h"
700
+#ifdef CONFIG_TCG
701
+#include "hw/core/tcg-cpu-ops.h"
702
+#endif /* CONFIG_TCG */
703
#include "qemu/module.h"
704
#if !defined(CONFIG_USER_ONLY)
705
#include "hw/loader.h"
706
@@ -XXX,XX +XXX,XX @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
707
{
708
CPUClass *cc = CPU_CLASS(oc);
709
710
-#ifdef CONFIG_TCG
711
- cc->tcg_ops.cpu_exec_interrupt = arm_cpu_exec_interrupt;
712
-#endif /* CONFIG_TCG */
713
-
714
cc->gdb_read_register = aarch64_cpu_gdb_read_register;
715
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
716
cc->gdb_num_core_regs = 34;
717
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
718
index XXXXXXX..XXXXXXX 100644
719
--- a/target/arm/cpu_tcg.c
720
+++ b/target/arm/cpu_tcg.c
721
@@ -XXX,XX +XXX,XX @@
722
723
#include "qemu/osdep.h"
724
#include "cpu.h"
725
+#ifdef CONFIG_TCG
726
+#include "hw/core/tcg-cpu-ops.h"
727
+#endif /* CONFIG_TCG */
728
#include "internals.h"
729
730
/* CPU models. These are not needed for the AArch64 linux-user build. */
731
@@ -XXX,XX +XXX,XX @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
732
if (interrupt_request & CPU_INTERRUPT_HARD
733
&& (armv7m_nvic_can_take_pending_exception(env->nvic))) {
734
cs->exception_index = EXCP_IRQ;
735
- cc->tcg_ops.do_interrupt(cs);
736
+ cc->tcg_ops->do_interrupt(cs);
737
ret = true;
738
}
739
return ret;
740
@@ -XXX,XX +XXX,XX @@ static void pxa270c5_initfn(Object *obj)
741
cpu->reset_sctlr = 0x00000078;
742
}
743
744
+#ifdef CONFIG_TCG
745
+static struct TCGCPUOps arm_v7m_tcg_ops = {
746
+ .initialize = arm_translate_init,
747
+ .synchronize_from_tb = arm_cpu_synchronize_from_tb,
748
+ .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
749
+ .tlb_fill = arm_cpu_tlb_fill,
750
+ .debug_excp_handler = arm_debug_excp_handler,
751
+
752
+#if !defined(CONFIG_USER_ONLY)
753
+ .do_interrupt = arm_v7m_cpu_do_interrupt,
754
+ .do_transaction_failed = arm_cpu_do_transaction_failed,
755
+ .do_unaligned_access = arm_cpu_do_unaligned_access,
756
+ .adjust_watchpoint_address = arm_adjust_watchpoint_address,
757
+ .debug_check_watchpoint = arm_debug_check_watchpoint,
758
+#endif /* !CONFIG_USER_ONLY */
759
+};
760
+#endif /* CONFIG_TCG */
761
+
762
static void arm_v7m_class_init(ObjectClass *oc, void *data)
763
{
764
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
765
@@ -XXX,XX +XXX,XX @@ static void arm_v7m_class_init(ObjectClass *oc, void *data)
766
767
acc->info = data;
768
#ifdef CONFIG_TCG
769
- cc->tcg_ops.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
770
-#ifndef CONFIG_USER_ONLY
771
- cc->tcg_ops.do_interrupt = arm_v7m_cpu_do_interrupt;
772
-#endif
773
+ cc->tcg_ops = &arm_v7m_tcg_ops;
774
#endif /* CONFIG_TCG */
775
776
cc->gdb_core_xml_file = "arm-m-profile.xml";
777
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
483
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
778
index XXXXXXX..XXXXXXX 100644
484
index XXXXXXX..XXXXXXX 100644
779
--- a/target/avr/cpu.c
485
--- a/target/avr/cpu.c
780
+++ b/target/avr/cpu.c
486
+++ b/target/avr/cpu.c
781
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags)
487
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
782
qemu_fprintf(f, "\n");
488
783
}
489
static const TCGCPUOps avr_tcg_ops = {
784
490
.initialize = avr_cpu_tcg_init,
785
+#include "hw/core/tcg-cpu-ops.h"
491
+ .translate_code = avr_cpu_translate_code,
786
+
492
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
787
+static struct TCGCPUOps avr_tcg_ops = {
493
.restore_state_to_opc = avr_restore_state_to_opc,
788
+ .initialize = avr_cpu_tcg_init,
494
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
789
+ .synchronize_from_tb = avr_cpu_synchronize_from_tb,
495
diff --git a/target/avr/translate.c b/target/avr/translate.c
790
+ .cpu_exec_interrupt = avr_cpu_exec_interrupt,
496
index XXXXXXX..XXXXXXX 100644
791
+ .tlb_fill = avr_cpu_tlb_fill,
497
--- a/target/avr/translate.c
792
+
498
+++ b/target/avr/translate.c
793
+#ifndef CONFIG_USER_ONLY
499
@@ -XXX,XX +XXX,XX @@ static bool trans_WDR(DisasContext *ctx, arg_WDR *a)
794
+ .do_interrupt = avr_cpu_do_interrupt,
500
*
795
+#endif /* !CONFIG_USER_ONLY */
501
* - translate()
796
+};
502
* - canonicalize_skip()
797
+
503
- * - gen_intermediate_code()
798
static void avr_cpu_class_init(ObjectClass *oc, void *data)
504
+ * - translate_code()
799
{
505
* - restore_state_to_opc()
800
DeviceClass *dc = DEVICE_CLASS(oc);
506
*
801
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
507
*/
802
cc->class_by_name = avr_cpu_class_by_name;
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
803
509
.tb_stop = avr_tr_tb_stop,
804
cc->has_work = avr_cpu_has_work;
510
};
805
- cc->tcg_ops.do_interrupt = avr_cpu_do_interrupt;
511
806
- cc->tcg_ops.cpu_exec_interrupt = avr_cpu_exec_interrupt;
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
807
cc->dump_state = avr_cpu_dump_state;
513
- vaddr pc, void *host_pc)
808
cc->set_pc = avr_cpu_set_pc;
514
+void avr_cpu_translate_code(CPUState *cs, TranslationBlock *tb,
809
cc->memory_rw_debug = avr_cpu_memory_rw_debug;
515
+ int *max_insns, vaddr pc, void *host_pc)
810
cc->get_phys_page_debug = avr_cpu_get_phys_page_debug;
516
{
811
- cc->tcg_ops.tlb_fill = avr_cpu_tlb_fill;
517
DisasContext dc = { };
812
cc->vmsd = &vms_avr_cpu;
518
translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
813
cc->disas_set_info = avr_cpu_disas_set_info;
519
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
814
- cc->tcg_ops.initialize = avr_cpu_tcg_init;
520
index XXXXXXX..XXXXXXX 100644
815
- cc->tcg_ops.synchronize_from_tb = avr_cpu_synchronize_from_tb;
521
--- a/target/hexagon/cpu.c
816
cc->gdb_read_register = avr_cpu_gdb_read_register;
522
+++ b/target/hexagon/cpu.c
817
cc->gdb_write_register = avr_cpu_gdb_write_register;
523
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_init(Object *obj)
818
cc->gdb_num_core_regs = 35;
524
819
cc->gdb_core_xml_file = "avr-cpu.xml";
525
static const TCGCPUOps hexagon_tcg_ops = {
820
+ cc->tcg_ops = &avr_tcg_ops;
526
.initialize = hexagon_translate_init,
821
}
527
+ .translate_code = hexagon_translate_code,
822
528
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
823
/*
529
.restore_state_to_opc = hexagon_restore_state_to_opc,
824
diff --git a/target/avr/helper.c b/target/avr/helper.c
530
};
825
index XXXXXXX..XXXXXXX 100644
531
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
826
--- a/target/avr/helper.c
532
index XXXXXXX..XXXXXXX 100644
827
+++ b/target/avr/helper.c
533
--- a/target/hexagon/translate.c
828
@@ -XXX,XX +XXX,XX @@
534
+++ b/target/hexagon/translate.c
829
535
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
830
#include "qemu/osdep.h"
536
.tb_stop = hexagon_tr_tb_stop,
831
#include "cpu.h"
537
};
832
+#include "hw/core/tcg-cpu-ops.h"
538
833
#include "exec/exec-all.h"
539
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
834
#include "exec/address-spaces.h"
540
- vaddr pc, void *host_pc)
835
#include "exec/helper-proto.h"
541
+void hexagon_translate_code(CPUState *cs, TranslationBlock *tb,
836
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
542
+ int *max_insns, vaddr pc, void *host_pc)
837
if (interrupt_request & CPU_INTERRUPT_RESET) {
543
{
838
if (cpu_interrupts_enabled(env)) {
544
DisasContext ctx;
839
cs->exception_index = EXCP_RESET;
545
840
- cc->tcg_ops.do_interrupt(cs);
841
+ cc->tcg_ops->do_interrupt(cs);
842
843
cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
844
845
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
846
if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
847
int index = ctz32(env->intsrc);
848
cs->exception_index = EXCP_INT(index);
849
- cc->tcg_ops.do_interrupt(cs);
850
+ cc->tcg_ops->do_interrupt(cs);
851
852
env->intsrc &= env->intsrc - 1; /* clear the interrupt */
853
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
854
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
855
index XXXXXXX..XXXXXXX 100644
856
--- a/target/cris/cpu.c
857
+++ b/target/cris/cpu.c
858
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_initfn(Object *obj)
859
#endif
860
}
861
862
+#include "hw/core/tcg-cpu-ops.h"
863
+
864
+static struct TCGCPUOps crisv10_tcg_ops = {
865
+ .initialize = cris_initialize_crisv10_tcg,
866
+ .cpu_exec_interrupt = cris_cpu_exec_interrupt,
867
+ .tlb_fill = cris_cpu_tlb_fill,
868
+
869
+#ifndef CONFIG_USER_ONLY
870
+ .do_interrupt = crisv10_cpu_do_interrupt,
871
+#endif /* !CONFIG_USER_ONLY */
872
+};
873
+
874
+static struct TCGCPUOps crisv32_tcg_ops = {
875
+ .initialize = cris_initialize_tcg,
876
+ .cpu_exec_interrupt = cris_cpu_exec_interrupt,
877
+ .tlb_fill = cris_cpu_tlb_fill,
878
+
879
+#ifndef CONFIG_USER_ONLY
880
+ .do_interrupt = cris_cpu_do_interrupt,
881
+#endif /* !CONFIG_USER_ONLY */
882
+};
883
+
884
static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
885
{
886
CPUClass *cc = CPU_CLASS(oc);
887
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
888
889
ccc->vr = 8;
890
- cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
891
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
892
- cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
893
+ cc->tcg_ops = &crisv10_tcg_ops;
894
}
895
896
static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
897
@@ -XXX,XX +XXX,XX @@ static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
898
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
899
900
ccc->vr = 9;
901
- cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
902
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
903
- cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
904
+ cc->tcg_ops = &crisv10_tcg_ops;
905
}
906
907
static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
908
@@ -XXX,XX +XXX,XX @@ static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
909
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
910
911
ccc->vr = 10;
912
- cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
913
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
914
- cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
915
+ cc->tcg_ops = &crisv10_tcg_ops;
916
}
917
918
static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
919
@@ -XXX,XX +XXX,XX @@ static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
920
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
921
922
ccc->vr = 11;
923
- cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
924
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
925
- cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
926
+ cc->tcg_ops = &crisv10_tcg_ops;
927
}
928
929
static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
930
@@ -XXX,XX +XXX,XX @@ static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
931
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
932
933
ccc->vr = 17;
934
- cc->tcg_ops.do_interrupt = crisv10_cpu_do_interrupt;
935
cc->gdb_read_register = crisv10_cpu_gdb_read_register;
936
- cc->tcg_ops.initialize = cris_initialize_crisv10_tcg;
937
+ cc->tcg_ops = &crisv10_tcg_ops;
938
}
939
940
static void crisv32_cpu_class_init(ObjectClass *oc, void *data)
941
{
942
+ CPUClass *cc = CPU_CLASS(oc);
943
CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
944
945
ccc->vr = 32;
946
+ cc->tcg_ops = &crisv32_tcg_ops;
947
}
948
949
static void cris_cpu_class_init(ObjectClass *oc, void *data)
950
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
951
952
cc->class_by_name = cris_cpu_class_by_name;
953
cc->has_work = cris_cpu_has_work;
954
- cc->tcg_ops.do_interrupt = cris_cpu_do_interrupt;
955
- cc->tcg_ops.cpu_exec_interrupt = cris_cpu_exec_interrupt;
956
cc->dump_state = cris_cpu_dump_state;
957
cc->set_pc = cris_cpu_set_pc;
958
cc->gdb_read_register = cris_cpu_gdb_read_register;
959
cc->gdb_write_register = cris_cpu_gdb_write_register;
960
- cc->tcg_ops.tlb_fill = cris_cpu_tlb_fill;
961
#ifndef CONFIG_USER_ONLY
962
cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
963
dc->vmsd = &vmstate_cris_cpu;
964
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
965
cc->gdb_stop_before_watchpoint = true;
966
967
cc->disas_set_info = cris_disas_set_info;
968
- cc->tcg_ops.initialize = cris_initialize_tcg;
969
}
970
971
#define DEFINE_CRIS_CPU_TYPE(cpu_model, initfn) \
972
diff --git a/target/cris/helper.c b/target/cris/helper.c
973
index XXXXXXX..XXXXXXX 100644
974
--- a/target/cris/helper.c
975
+++ b/target/cris/helper.c
976
@@ -XXX,XX +XXX,XX @@
977
978
#include "qemu/osdep.h"
979
#include "cpu.h"
980
+#include "hw/core/tcg-cpu-ops.h"
981
#include "mmu.h"
982
#include "qemu/host-utils.h"
983
#include "exec/exec-all.h"
984
@@ -XXX,XX +XXX,XX @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
985
&& (env->pregs[PR_CCS] & I_FLAG)
986
&& !env->locked_irq) {
987
cs->exception_index = EXCP_IRQ;
988
- cc->tcg_ops.do_interrupt(cs);
989
+ cc->tcg_ops->do_interrupt(cs);
990
ret = true;
991
}
992
if (interrupt_request & CPU_INTERRUPT_NMI) {
993
@@ -XXX,XX +XXX,XX @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
994
}
995
if ((env->pregs[PR_CCS] & m_flag_archval)) {
996
cs->exception_index = EXCP_NMI;
997
- cc->tcg_ops.do_interrupt(cs);
998
+ cc->tcg_ops->do_interrupt(cs);
999
ret = true;
1000
}
1001
}
1002
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
546
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
1003
index XXXXXXX..XXXXXXX 100644
547
index XXXXXXX..XXXXXXX 100644
1004
--- a/target/hppa/cpu.c
548
--- a/target/hppa/cpu.c
1005
+++ b/target/hppa/cpu.c
549
+++ b/target/hppa/cpu.c
1006
@@ -XXX,XX +XXX,XX @@ static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
550
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
1007
return object_class_by_name(TYPE_HPPA_CPU);
551
1008
}
552
static const TCGCPUOps hppa_tcg_ops = {
1009
553
.initialize = hppa_translate_init,
1010
+#include "hw/core/tcg-cpu-ops.h"
554
+ .translate_code = hppa_translate_code,
1011
+
555
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
1012
+static struct TCGCPUOps hppa_tcg_ops = {
556
.restore_state_to_opc = hppa_restore_state_to_opc,
1013
+ .initialize = hppa_translate_init,
557
1014
+ .synchronize_from_tb = hppa_cpu_synchronize_from_tb,
558
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
1015
+ .cpu_exec_interrupt = hppa_cpu_exec_interrupt,
559
index XXXXXXX..XXXXXXX 100644
1016
+ .tlb_fill = hppa_cpu_tlb_fill,
560
--- a/target/hppa/translate.c
1017
+
561
+++ b/target/hppa/translate.c
1018
+#ifndef CONFIG_USER_ONLY
562
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
1019
+ .do_interrupt = hppa_cpu_do_interrupt,
1020
+ .do_unaligned_access = hppa_cpu_do_unaligned_access,
1021
+#endif /* !CONFIG_USER_ONLY */
1022
+};
1023
+
1024
static void hppa_cpu_class_init(ObjectClass *oc, void *data)
1025
{
1026
DeviceClass *dc = DEVICE_CLASS(oc);
1027
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
1028
1029
cc->class_by_name = hppa_cpu_class_by_name;
1030
cc->has_work = hppa_cpu_has_work;
1031
- cc->tcg_ops.do_interrupt = hppa_cpu_do_interrupt;
1032
- cc->tcg_ops.cpu_exec_interrupt = hppa_cpu_exec_interrupt;
1033
cc->dump_state = hppa_cpu_dump_state;
1034
cc->set_pc = hppa_cpu_set_pc;
1035
- cc->tcg_ops.synchronize_from_tb = hppa_cpu_synchronize_from_tb;
1036
cc->gdb_read_register = hppa_cpu_gdb_read_register;
1037
cc->gdb_write_register = hppa_cpu_gdb_write_register;
1038
- cc->tcg_ops.tlb_fill = hppa_cpu_tlb_fill;
1039
#ifndef CONFIG_USER_ONLY
1040
cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
1041
- cc->tcg_ops.do_unaligned_access = hppa_cpu_do_unaligned_access;
1042
dc->vmsd = &vmstate_hppa_cpu;
1043
#endif
563
#endif
1044
cc->disas_set_info = hppa_cpu_disas_set_info;
564
};
1045
- cc->tcg_ops.initialize = hppa_translate_init;
565
1046
-
566
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1047
cc->gdb_num_core_regs = 128;
567
- vaddr pc, void *host_pc)
1048
+ cc->tcg_ops = &hppa_tcg_ops;
568
+void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
1049
}
569
+ int *max_insns, vaddr pc, void *host_pc)
1050
570
{
1051
static const TypeInfo hppa_cpu_type_info = {
571
DisasContext ctx = { };
572
translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
1052
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
573
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
1053
index XXXXXXX..XXXXXXX 100644
574
index XXXXXXX..XXXXXXX 100644
1054
--- a/target/i386/tcg/tcg-cpu.c
575
--- a/target/i386/tcg/tcg-cpu.c
1055
+++ b/target/i386/tcg/tcg-cpu.c
576
+++ b/target/i386/tcg/tcg-cpu.c
1056
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
577
@@ -XXX,XX +XXX,XX @@ static bool x86_debug_check_breakpoint(CPUState *cs)
1057
cpu->env.eip = tb->pc - tb->cs_base;
578
1058
}
579
static const TCGCPUOps x86_tcg_ops = {
1059
580
.initialize = tcg_x86_init,
1060
+#include "hw/core/tcg-cpu-ops.h"
581
+ .translate_code = x86_translate_code,
1061
+
582
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
1062
+static struct TCGCPUOps x86_tcg_ops = {
583
.restore_state_to_opc = x86_restore_state_to_opc,
1063
+ .initialize = tcg_x86_init,
584
.cpu_exec_enter = x86_cpu_exec_enter,
1064
+ .synchronize_from_tb = x86_cpu_synchronize_from_tb,
585
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
1065
+ .cpu_exec_enter = x86_cpu_exec_enter,
586
index XXXXXXX..XXXXXXX 100644
1066
+ .cpu_exec_exit = x86_cpu_exec_exit,
587
--- a/target/i386/tcg/translate.c
1067
+ .cpu_exec_interrupt = x86_cpu_exec_interrupt,
588
+++ b/target/i386/tcg/translate.c
1068
+ .do_interrupt = x86_cpu_do_interrupt,
589
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
1069
+ .tlb_fill = x86_cpu_tlb_fill,
590
.tb_stop = i386_tr_tb_stop,
1070
+#ifndef CONFIG_USER_ONLY
591
};
1071
+ .debug_excp_handler = breakpoint_handler,
592
1072
+#endif /* !CONFIG_USER_ONLY */
593
-/* generate intermediate code for basic block 'tb'. */
1073
+};
594
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1074
+
595
- vaddr pc, void *host_pc)
1075
void tcg_cpu_common_class_init(CPUClass *cc)
596
+void x86_translate_code(CPUState *cpu, TranslationBlock *tb,
1076
{
597
+ int *max_insns, vaddr pc, void *host_pc)
1077
- cc->tcg_ops.do_interrupt = x86_cpu_do_interrupt;
598
{
1078
- cc->tcg_ops.cpu_exec_interrupt = x86_cpu_exec_interrupt;
599
DisasContext dc;
1079
- cc->tcg_ops.synchronize_from_tb = x86_cpu_synchronize_from_tb;
600
1080
- cc->tcg_ops.cpu_exec_enter = x86_cpu_exec_enter;
601
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
1081
- cc->tcg_ops.cpu_exec_exit = x86_cpu_exec_exit;
602
index XXXXXXX..XXXXXXX 100644
1082
- cc->tcg_ops.initialize = tcg_x86_init;
603
--- a/target/loongarch/cpu.c
1083
- cc->tcg_ops.tlb_fill = x86_cpu_tlb_fill;
604
+++ b/target/loongarch/cpu.c
1084
-#ifndef CONFIG_USER_ONLY
605
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1085
- cc->tcg_ops.debug_excp_handler = breakpoint_handler;
606
1086
-#endif
607
static const TCGCPUOps loongarch_tcg_ops = {
1087
+ cc->tcg_ops = &x86_tcg_ops;
608
.initialize = loongarch_translate_init,
1088
}
609
+ .translate_code = loongarch_translate_code,
1089
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
610
.synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
1090
index XXXXXXX..XXXXXXX 100644
611
.restore_state_to_opc = loongarch_restore_state_to_opc,
1091
--- a/target/lm32/cpu.c
612
1092
+++ b/target/lm32/cpu.c
613
diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c
1093
@@ -XXX,XX +XXX,XX @@ static ObjectClass *lm32_cpu_class_by_name(const char *cpu_model)
614
index XXXXXXX..XXXXXXX 100644
1094
return oc;
615
--- a/target/loongarch/tcg/translate.c
1095
}
616
+++ b/target/loongarch/tcg/translate.c
1096
617
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
1097
+#include "hw/core/tcg-cpu-ops.h"
618
.tb_stop = loongarch_tr_tb_stop,
1098
+
619
};
1099
+static struct TCGCPUOps lm32_tcg_ops = {
620
1100
+ .initialize = lm32_translate_init,
621
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1101
+ .cpu_exec_interrupt = lm32_cpu_exec_interrupt,
622
- vaddr pc, void *host_pc)
1102
+ .tlb_fill = lm32_cpu_tlb_fill,
623
+void loongarch_translate_code(CPUState *cs, TranslationBlock *tb,
1103
+ .debug_excp_handler = lm32_debug_excp_handler,
624
+ int *max_insns, vaddr pc, void *host_pc)
1104
+
625
{
1105
+#ifndef CONFIG_USER_ONLY
626
DisasContext ctx;
1106
+ .do_interrupt = lm32_cpu_do_interrupt,
627
1107
+#endif /* !CONFIG_USER_ONLY */
1108
+};
1109
+
1110
static void lm32_cpu_class_init(ObjectClass *oc, void *data)
1111
{
1112
LM32CPUClass *lcc = LM32_CPU_CLASS(oc);
1113
@@ -XXX,XX +XXX,XX @@ static void lm32_cpu_class_init(ObjectClass *oc, void *data)
1114
1115
cc->class_by_name = lm32_cpu_class_by_name;
1116
cc->has_work = lm32_cpu_has_work;
1117
- cc->tcg_ops.do_interrupt = lm32_cpu_do_interrupt;
1118
- cc->tcg_ops.cpu_exec_interrupt = lm32_cpu_exec_interrupt;
1119
cc->dump_state = lm32_cpu_dump_state;
1120
cc->set_pc = lm32_cpu_set_pc;
1121
cc->gdb_read_register = lm32_cpu_gdb_read_register;
1122
cc->gdb_write_register = lm32_cpu_gdb_write_register;
1123
- cc->tcg_ops.tlb_fill = lm32_cpu_tlb_fill;
1124
#ifndef CONFIG_USER_ONLY
1125
cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug;
1126
cc->vmsd = &vmstate_lm32_cpu;
1127
#endif
1128
cc->gdb_num_core_regs = 32 + 7;
1129
cc->gdb_stop_before_watchpoint = true;
1130
- cc->tcg_ops.debug_excp_handler = lm32_debug_excp_handler;
1131
cc->disas_set_info = lm32_cpu_disas_set_info;
1132
- cc->tcg_ops.initialize = lm32_translate_init;
1133
+ cc->tcg_ops = &lm32_tcg_ops;
1134
}
1135
1136
#define DEFINE_LM32_CPU_TYPE(cpu_model, initfn) \
1137
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
628
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
1138
index XXXXXXX..XXXXXXX 100644
629
index XXXXXXX..XXXXXXX 100644
1139
--- a/target/m68k/cpu.c
630
--- a/target/m68k/cpu.c
1140
+++ b/target/m68k/cpu.c
631
+++ b/target/m68k/cpu.c
1141
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m68k_cpu = {
632
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
1142
};
633
1143
#endif
634
static const TCGCPUOps m68k_tcg_ops = {
1144
635
.initialize = m68k_tcg_init,
1145
+#include "hw/core/tcg-cpu-ops.h"
636
+ .translate_code = m68k_translate_code,
1146
+
637
.restore_state_to_opc = m68k_restore_state_to_opc,
1147
+static struct TCGCPUOps m68k_tcg_ops = {
638
1148
+ .initialize = m68k_tcg_init,
639
#ifndef CONFIG_USER_ONLY
1149
+ .cpu_exec_interrupt = m68k_cpu_exec_interrupt,
640
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
1150
+ .tlb_fill = m68k_cpu_tlb_fill,
641
index XXXXXXX..XXXXXXX 100644
1151
+
642
--- a/target/m68k/translate.c
1152
+#ifndef CONFIG_USER_ONLY
643
+++ b/target/m68k/translate.c
1153
+ .do_interrupt = m68k_cpu_do_interrupt,
644
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
1154
+ .do_transaction_failed = m68k_cpu_transaction_failed,
645
.tb_stop = m68k_tr_tb_stop,
1155
+#endif /* !CONFIG_USER_ONLY */
646
};
1156
+};
647
1157
+
648
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1158
static void m68k_cpu_class_init(ObjectClass *c, void *data)
649
- vaddr pc, void *host_pc)
1159
{
650
+void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
1160
M68kCPUClass *mcc = M68K_CPU_CLASS(c);
651
+ int *max_insns, vaddr pc, void *host_pc)
1161
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
652
{
1162
653
DisasContext dc;
1163
cc->class_by_name = m68k_cpu_class_by_name;
654
translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
1164
cc->has_work = m68k_cpu_has_work;
1165
- cc->tcg_ops.do_interrupt = m68k_cpu_do_interrupt;
1166
- cc->tcg_ops.cpu_exec_interrupt = m68k_cpu_exec_interrupt;
1167
cc->dump_state = m68k_cpu_dump_state;
1168
cc->set_pc = m68k_cpu_set_pc;
1169
cc->gdb_read_register = m68k_cpu_gdb_read_register;
1170
cc->gdb_write_register = m68k_cpu_gdb_write_register;
1171
- cc->tcg_ops.tlb_fill = m68k_cpu_tlb_fill;
1172
#if defined(CONFIG_SOFTMMU)
1173
- cc->tcg_ops.do_transaction_failed = m68k_cpu_transaction_failed;
1174
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
1175
dc->vmsd = &vmstate_m68k_cpu;
1176
#endif
1177
cc->disas_set_info = m68k_cpu_disas_set_info;
1178
- cc->tcg_ops.initialize = m68k_tcg_init;
1179
1180
cc->gdb_num_core_regs = 18;
1181
+ cc->tcg_ops = &m68k_tcg_ops;
1182
}
1183
1184
static void m68k_cpu_class_init_cf_core(ObjectClass *c, void *data)
1185
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
655
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
1186
index XXXXXXX..XXXXXXX 100644
656
index XXXXXXX..XXXXXXX 100644
1187
--- a/target/microblaze/cpu.c
657
--- a/target/microblaze/cpu.c
1188
+++ b/target/microblaze/cpu.c
658
+++ b/target/microblaze/cpu.c
1189
@@ -XXX,XX +XXX,XX @@ static ObjectClass *mb_cpu_class_by_name(const char *cpu_model)
659
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
1190
return object_class_by_name(TYPE_MICROBLAZE_CPU);
660
1191
}
661
static const TCGCPUOps mb_tcg_ops = {
1192
662
.initialize = mb_tcg_init,
1193
+#include "hw/core/tcg-cpu-ops.h"
663
+ .translate_code = mb_translate_code,
1194
+
664
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
1195
+static struct TCGCPUOps mb_tcg_ops = {
665
.restore_state_to_opc = mb_restore_state_to_opc,
1196
+ .initialize = mb_tcg_init,
666
1197
+ .synchronize_from_tb = mb_cpu_synchronize_from_tb,
667
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
1198
+ .cpu_exec_interrupt = mb_cpu_exec_interrupt,
668
index XXXXXXX..XXXXXXX 100644
1199
+ .tlb_fill = mb_cpu_tlb_fill,
669
--- a/target/microblaze/translate.c
1200
+
670
+++ b/target/microblaze/translate.c
1201
+#ifndef CONFIG_USER_ONLY
671
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
1202
+ .do_interrupt = mb_cpu_do_interrupt,
672
.tb_stop = mb_tr_tb_stop,
1203
+ .do_transaction_failed = mb_cpu_transaction_failed,
673
};
1204
+ .do_unaligned_access = mb_cpu_do_unaligned_access,
674
1205
+#endif /* !CONFIG_USER_ONLY */
675
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1206
+};
676
- vaddr pc, void *host_pc)
1207
+
677
+void mb_translate_code(CPUState *cpu, TranslationBlock *tb,
1208
static void mb_cpu_class_init(ObjectClass *oc, void *data)
678
+ int *max_insns, vaddr pc, void *host_pc)
1209
{
679
{
1210
DeviceClass *dc = DEVICE_CLASS(oc);
680
DisasContext dc;
1211
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
681
translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1212
1213
cc->class_by_name = mb_cpu_class_by_name;
1214
cc->has_work = mb_cpu_has_work;
1215
- cc->tcg_ops.do_interrupt = mb_cpu_do_interrupt;
1216
- cc->tcg_ops.cpu_exec_interrupt = mb_cpu_exec_interrupt;
1217
+
1218
cc->dump_state = mb_cpu_dump_state;
1219
cc->set_pc = mb_cpu_set_pc;
1220
- cc->tcg_ops.synchronize_from_tb = mb_cpu_synchronize_from_tb;
1221
cc->gdb_read_register = mb_cpu_gdb_read_register;
1222
cc->gdb_write_register = mb_cpu_gdb_write_register;
1223
- cc->tcg_ops.tlb_fill = mb_cpu_tlb_fill;
1224
+
1225
#ifndef CONFIG_USER_ONLY
1226
- cc->tcg_ops.do_transaction_failed = mb_cpu_transaction_failed;
1227
- cc->tcg_ops.do_unaligned_access = mb_cpu_do_unaligned_access;
1228
cc->get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug;
1229
dc->vmsd = &vmstate_mb_cpu;
1230
#endif
1231
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
1232
cc->gdb_num_core_regs = 32 + 27;
1233
1234
cc->disas_set_info = mb_disas_set_info;
1235
- cc->tcg_ops.initialize = mb_tcg_init;
1236
+ cc->tcg_ops = &mb_tcg_ops;
1237
}
1238
1239
static const TypeInfo mb_cpu_type_info = {
1240
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
682
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
1241
index XXXXXXX..XXXXXXX 100644
683
index XXXXXXX..XXXXXXX 100644
1242
--- a/target/mips/cpu.c
684
--- a/target/mips/cpu.c
1243
+++ b/target/mips/cpu.c
685
+++ b/target/mips/cpu.c
1244
@@ -XXX,XX +XXX,XX @@ static Property mips_cpu_properties[] = {
686
@@ -XXX,XX +XXX,XX @@ static const Property mips_cpu_properties[] = {
1245
DEFINE_PROP_END_OF_LIST()
687
#include "hw/core/tcg-cpu-ops.h"
1246
};
688
static const TCGCPUOps mips_tcg_ops = {
1247
689
.initialize = mips_tcg_init,
1248
+#ifdef CONFIG_TCG
690
+ .translate_code = mips_translate_code,
1249
+#include "hw/core/tcg-cpu-ops.h"
691
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
1250
+/*
692
.restore_state_to_opc = mips_restore_state_to_opc,
1251
+ * NB: cannot be const, as some elements are changed for specific
693
1252
+ * mips hardware (see hw/mips/jazz.c).
694
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
1253
+ */
695
index XXXXXXX..XXXXXXX 100644
1254
+static struct TCGCPUOps mips_tcg_ops = {
696
--- a/target/mips/tcg/translate.c
1255
+ .initialize = mips_tcg_init,
697
+++ b/target/mips/tcg/translate.c
1256
+ .synchronize_from_tb = mips_cpu_synchronize_from_tb,
698
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
1257
+ .cpu_exec_interrupt = mips_cpu_exec_interrupt,
699
.tb_stop = mips_tr_tb_stop,
1258
+ .tlb_fill = mips_cpu_tlb_fill,
700
};
1259
+
701
1260
+#if !defined(CONFIG_USER_ONLY)
702
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1261
+ .do_interrupt = mips_cpu_do_interrupt,
703
- vaddr pc, void *host_pc)
1262
+ .do_transaction_failed = mips_cpu_do_transaction_failed,
704
+void mips_translate_code(CPUState *cs, TranslationBlock *tb,
1263
+ .do_unaligned_access = mips_cpu_do_unaligned_access,
705
+ int *max_insns, vaddr pc, void *host_pc)
1264
+#endif /* !CONFIG_USER_ONLY */
706
{
1265
+};
707
DisasContext ctx;
1266
+#endif /* CONFIG_TCG */
708
1267
+
1268
static void mips_cpu_class_init(ObjectClass *c, void *data)
1269
{
1270
MIPSCPUClass *mcc = MIPS_CPU_CLASS(c);
1271
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
1272
cc->vmsd = &vmstate_mips_cpu;
1273
#endif
1274
cc->disas_set_info = mips_cpu_disas_set_info;
1275
-#ifdef CONFIG_TCG
1276
- cc->tcg_ops.initialize = mips_tcg_init;
1277
- cc->tcg_ops.do_interrupt = mips_cpu_do_interrupt;
1278
- cc->tcg_ops.cpu_exec_interrupt = mips_cpu_exec_interrupt;
1279
- cc->tcg_ops.synchronize_from_tb = mips_cpu_synchronize_from_tb;
1280
- cc->tcg_ops.tlb_fill = mips_cpu_tlb_fill;
1281
-#ifndef CONFIG_USER_ONLY
1282
- cc->tcg_ops.do_transaction_failed = mips_cpu_do_transaction_failed;
1283
- cc->tcg_ops.do_unaligned_access = mips_cpu_do_unaligned_access;
1284
-
1285
-#endif /* CONFIG_USER_ONLY */
1286
-#endif /* CONFIG_TCG */
1287
-
1288
cc->gdb_num_core_regs = 73;
1289
cc->gdb_stop_before_watchpoint = true;
1290
+#ifdef CONFIG_TCG
1291
+ cc->tcg_ops = &mips_tcg_ops;
1292
+#endif /* CONFIG_TCG */
1293
}
1294
1295
static const TypeInfo mips_cpu_type_info = {
1296
diff --git a/target/moxie/cpu.c b/target/moxie/cpu.c
1297
index XXXXXXX..XXXXXXX 100644
1298
--- a/target/moxie/cpu.c
1299
+++ b/target/moxie/cpu.c
1300
@@ -XXX,XX +XXX,XX @@ static ObjectClass *moxie_cpu_class_by_name(const char *cpu_model)
1301
return oc;
1302
}
1303
1304
+#include "hw/core/tcg-cpu-ops.h"
1305
+
1306
+static struct TCGCPUOps moxie_tcg_ops = {
1307
+ .initialize = moxie_translate_init,
1308
+ .tlb_fill = moxie_cpu_tlb_fill,
1309
+
1310
+#ifndef CONFIG_USER_ONLY
1311
+ .do_interrupt = moxie_cpu_do_interrupt,
1312
+#endif /* !CONFIG_USER_ONLY */
1313
+};
1314
+
1315
static void moxie_cpu_class_init(ObjectClass *oc, void *data)
1316
{
1317
DeviceClass *dc = DEVICE_CLASS(oc);
1318
@@ -XXX,XX +XXX,XX @@ static void moxie_cpu_class_init(ObjectClass *oc, void *data)
1319
cc->class_by_name = moxie_cpu_class_by_name;
1320
1321
cc->has_work = moxie_cpu_has_work;
1322
- cc->tcg_ops.do_interrupt = moxie_cpu_do_interrupt;
1323
cc->dump_state = moxie_cpu_dump_state;
1324
cc->set_pc = moxie_cpu_set_pc;
1325
- cc->tcg_ops.tlb_fill = moxie_cpu_tlb_fill;
1326
#ifndef CONFIG_USER_ONLY
1327
cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug;
1328
cc->vmsd = &vmstate_moxie_cpu;
1329
#endif
1330
cc->disas_set_info = moxie_cpu_disas_set_info;
1331
- cc->tcg_ops.initialize = moxie_translate_init;
1332
+ cc->tcg_ops = &moxie_tcg_ops;
1333
}
1334
1335
static void moxielite_initfn(Object *obj)
1336
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
1337
index XXXXXXX..XXXXXXX 100644
1338
--- a/target/nios2/cpu.c
1339
+++ b/target/nios2/cpu.c
1340
@@ -XXX,XX +XXX,XX @@ static Property nios2_properties[] = {
1341
DEFINE_PROP_END_OF_LIST(),
1342
};
1343
1344
+#include "hw/core/tcg-cpu-ops.h"
1345
+
1346
+static struct TCGCPUOps nios2_tcg_ops = {
1347
+ .initialize = nios2_tcg_init,
1348
+ .cpu_exec_interrupt = nios2_cpu_exec_interrupt,
1349
+ .tlb_fill = nios2_cpu_tlb_fill,
1350
+
1351
+#ifndef CONFIG_USER_ONLY
1352
+ .do_interrupt = nios2_cpu_do_interrupt,
1353
+ .do_unaligned_access = nios2_cpu_do_unaligned_access,
1354
+#endif /* !CONFIG_USER_ONLY */
1355
+};
1356
1357
static void nios2_cpu_class_init(ObjectClass *oc, void *data)
1358
{
1359
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
1360
1361
cc->class_by_name = nios2_cpu_class_by_name;
1362
cc->has_work = nios2_cpu_has_work;
1363
- cc->tcg_ops.do_interrupt = nios2_cpu_do_interrupt;
1364
- cc->tcg_ops.cpu_exec_interrupt = nios2_cpu_exec_interrupt;
1365
cc->dump_state = nios2_cpu_dump_state;
1366
cc->set_pc = nios2_cpu_set_pc;
1367
cc->disas_set_info = nios2_cpu_disas_set_info;
1368
- cc->tcg_ops.tlb_fill = nios2_cpu_tlb_fill;
1369
#ifndef CONFIG_USER_ONLY
1370
- cc->tcg_ops.do_unaligned_access = nios2_cpu_do_unaligned_access;
1371
cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
1372
#endif
1373
cc->gdb_read_register = nios2_cpu_gdb_read_register;
1374
cc->gdb_write_register = nios2_cpu_gdb_write_register;
1375
cc->gdb_num_core_regs = 49;
1376
- cc->tcg_ops.initialize = nios2_tcg_init;
1377
+ cc->tcg_ops = &nios2_tcg_ops;
1378
}
1379
1380
static const TypeInfo nios2_cpu_type_info = {
1381
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
709
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
1382
index XXXXXXX..XXXXXXX 100644
710
index XXXXXXX..XXXXXXX 100644
1383
--- a/target/openrisc/cpu.c
711
--- a/target/openrisc/cpu.c
1384
+++ b/target/openrisc/cpu.c
712
+++ b/target/openrisc/cpu.c
1385
@@ -XXX,XX +XXX,XX @@ static void openrisc_any_initfn(Object *obj)
713
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
1386
| (IMMUCFGR_NTS & (ctz32(TLB_SIZE) << 2));
714
1387
}
715
static const TCGCPUOps openrisc_tcg_ops = {
1388
716
.initialize = openrisc_translate_init,
1389
+#include "hw/core/tcg-cpu-ops.h"
717
+ .translate_code = openrisc_translate_code,
1390
+
718
.synchronize_from_tb = openrisc_cpu_synchronize_from_tb,
1391
+static struct TCGCPUOps openrisc_tcg_ops = {
719
.restore_state_to_opc = openrisc_restore_state_to_opc,
1392
+ .initialize = openrisc_translate_init,
720
1393
+ .cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
721
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
1394
+ .tlb_fill = openrisc_cpu_tlb_fill,
722
index XXXXXXX..XXXXXXX 100644
1395
+
723
--- a/target/openrisc/translate.c
1396
+#ifndef CONFIG_USER_ONLY
724
+++ b/target/openrisc/translate.c
1397
+ .do_interrupt = openrisc_cpu_do_interrupt,
725
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
1398
+#endif /* !CONFIG_USER_ONLY */
726
.tb_stop = openrisc_tr_tb_stop,
1399
+};
727
};
1400
+
728
1401
static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
729
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1402
{
730
- vaddr pc, void *host_pc)
1403
OpenRISCCPUClass *occ = OPENRISC_CPU_CLASS(oc);
731
+void openrisc_translate_code(CPUState *cs, TranslationBlock *tb,
1404
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
732
+ int *max_insns, vaddr pc, void *host_pc)
1405
733
{
1406
cc->class_by_name = openrisc_cpu_class_by_name;
734
DisasContext ctx;
1407
cc->has_work = openrisc_cpu_has_work;
735
1408
- cc->tcg_ops.do_interrupt = openrisc_cpu_do_interrupt;
736
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
1409
- cc->tcg_ops.cpu_exec_interrupt = openrisc_cpu_exec_interrupt;
737
index XXXXXXX..XXXXXXX 100644
1410
cc->dump_state = openrisc_cpu_dump_state;
738
--- a/target/ppc/cpu_init.c
1411
cc->set_pc = openrisc_cpu_set_pc;
739
+++ b/target/ppc/cpu_init.c
1412
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
740
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
1413
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
741
1414
- cc->tcg_ops.tlb_fill = openrisc_cpu_tlb_fill;
742
static const TCGCPUOps ppc_tcg_ops = {
1415
#ifndef CONFIG_USER_ONLY
743
.initialize = ppc_translate_init,
1416
cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
744
+ .translate_code = ppc_translate_code,
1417
dc->vmsd = &vmstate_openrisc_cpu;
745
.restore_state_to_opc = ppc_restore_state_to_opc,
1418
#endif
746
1419
cc->gdb_num_core_regs = 32 + 3;
747
#ifdef CONFIG_USER_ONLY
1420
- cc->tcg_ops.initialize = openrisc_translate_init;
748
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
1421
cc->disas_set_info = openrisc_disas_set_info;
749
index XXXXXXX..XXXXXXX 100644
1422
+ cc->tcg_ops = &openrisc_tcg_ops;
750
--- a/target/ppc/translate.c
1423
}
751
+++ b/target/ppc/translate.c
1424
752
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
1425
/* Sort alphabetically by type name, except for "any". */
753
.tb_stop = ppc_tr_tb_stop,
1426
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
754
};
1427
index XXXXXXX..XXXXXXX 100644
755
1428
--- a/target/riscv/cpu.c
756
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1429
+++ b/target/riscv/cpu.c
757
- vaddr pc, void *host_pc)
1430
@@ -XXX,XX +XXX,XX @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
758
+void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
1431
return NULL;
759
+ int *max_insns, vaddr pc, void *host_pc)
1432
}
760
{
1433
761
DisasContext ctx;
1434
+#include "hw/core/tcg-cpu-ops.h"
762
1435
+
763
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
1436
+static struct TCGCPUOps riscv_tcg_ops = {
764
index XXXXXXX..XXXXXXX 100644
1437
+ .initialize = riscv_translate_init,
765
--- a/target/riscv/tcg/tcg-cpu.c
1438
+ .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
766
+++ b/target/riscv/tcg/tcg-cpu.c
1439
+ .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
767
@@ -XXX,XX +XXX,XX @@ static void riscv_restore_state_to_opc(CPUState *cs,
1440
+ .tlb_fill = riscv_cpu_tlb_fill,
768
1441
+
769
static const TCGCPUOps riscv_tcg_ops = {
1442
+#ifndef CONFIG_USER_ONLY
770
.initialize = riscv_translate_init,
1443
+ .do_interrupt = riscv_cpu_do_interrupt,
771
+ .translate_code = riscv_translate_code,
1444
+ .do_transaction_failed = riscv_cpu_do_transaction_failed,
772
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
1445
+ .do_unaligned_access = riscv_cpu_do_unaligned_access,
773
.restore_state_to_opc = riscv_restore_state_to_opc,
1446
+#endif /* !CONFIG_USER_ONLY */
774
1447
+};
775
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
1448
+
776
index XXXXXXX..XXXXXXX 100644
1449
static void riscv_cpu_class_init(ObjectClass *c, void *data)
777
--- a/target/riscv/translate.c
1450
{
778
+++ b/target/riscv/translate.c
1451
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
779
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
1452
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
780
.tb_stop = riscv_tr_tb_stop,
1453
781
};
1454
cc->class_by_name = riscv_cpu_class_by_name;
782
1455
cc->has_work = riscv_cpu_has_work;
783
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1456
- cc->tcg_ops.do_interrupt = riscv_cpu_do_interrupt;
784
- vaddr pc, void *host_pc)
1457
- cc->tcg_ops.cpu_exec_interrupt = riscv_cpu_exec_interrupt;
785
+void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
1458
cc->dump_state = riscv_cpu_dump_state;
786
+ int *max_insns, vaddr pc, void *host_pc)
1459
cc->set_pc = riscv_cpu_set_pc;
787
{
1460
- cc->tcg_ops.synchronize_from_tb = riscv_cpu_synchronize_from_tb;
788
DisasContext ctx;
1461
cc->gdb_read_register = riscv_cpu_gdb_read_register;
789
1462
cc->gdb_write_register = riscv_cpu_gdb_write_register;
1463
cc->gdb_num_core_regs = 33;
1464
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
1465
cc->gdb_stop_before_watchpoint = true;
1466
cc->disas_set_info = riscv_cpu_disas_set_info;
1467
#ifndef CONFIG_USER_ONLY
1468
- cc->tcg_ops.do_transaction_failed = riscv_cpu_do_transaction_failed;
1469
- cc->tcg_ops.do_unaligned_access = riscv_cpu_do_unaligned_access;
1470
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
1471
/* For now, mark unmigratable: */
1472
cc->vmsd = &vmstate_riscv_cpu;
1473
#endif
1474
cc->gdb_arch_name = riscv_gdb_arch_name;
1475
cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1476
- cc->tcg_ops.initialize = riscv_translate_init;
1477
- cc->tcg_ops.tlb_fill = riscv_cpu_tlb_fill;
1478
+ cc->tcg_ops = &riscv_tcg_ops;
1479
1480
device_class_set_props(dc, riscv_cpu_properties);
1481
}
1482
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
790
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
1483
index XXXXXXX..XXXXXXX 100644
791
index XXXXXXX..XXXXXXX 100644
1484
--- a/target/rx/cpu.c
792
--- a/target/rx/cpu.c
1485
+++ b/target/rx/cpu.c
793
+++ b/target/rx/cpu.c
1486
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_init(Object *obj)
794
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
1487
qdev_init_gpio_in(DEVICE(cpu), rx_cpu_set_irq, 2);
795
1488
}
796
static const TCGCPUOps rx_tcg_ops = {
1489
797
.initialize = rx_translate_init,
1490
+#include "hw/core/tcg-cpu-ops.h"
798
+ .translate_code = rx_translate_code,
1491
+
799
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
1492
+static struct TCGCPUOps rx_tcg_ops = {
800
.restore_state_to_opc = rx_restore_state_to_opc,
1493
+ .initialize = rx_translate_init,
801
.tlb_fill = rx_cpu_tlb_fill,
1494
+ .synchronize_from_tb = rx_cpu_synchronize_from_tb,
802
diff --git a/target/rx/translate.c b/target/rx/translate.c
1495
+ .cpu_exec_interrupt = rx_cpu_exec_interrupt,
803
index XXXXXXX..XXXXXXX 100644
1496
+ .tlb_fill = rx_cpu_tlb_fill,
804
--- a/target/rx/translate.c
1497
+
805
+++ b/target/rx/translate.c
1498
+#ifndef CONFIG_USER_ONLY
806
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
1499
+ .do_interrupt = rx_cpu_do_interrupt,
807
.tb_stop = rx_tr_tb_stop,
1500
+#endif /* !CONFIG_USER_ONLY */
808
};
1501
+};
809
1502
+
810
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1503
static void rx_cpu_class_init(ObjectClass *klass, void *data)
811
- vaddr pc, void *host_pc)
1504
{
812
+void rx_translate_code(CPUState *cs, TranslationBlock *tb,
1505
DeviceClass *dc = DEVICE_CLASS(klass);
813
+ int *max_insns, vaddr pc, void *host_pc)
1506
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
814
{
1507
815
DisasContext dc;
1508
cc->class_by_name = rx_cpu_class_by_name;
816
1509
cc->has_work = rx_cpu_has_work;
1510
- cc->tcg_ops.do_interrupt = rx_cpu_do_interrupt;
1511
- cc->tcg_ops.cpu_exec_interrupt = rx_cpu_exec_interrupt;
1512
cc->dump_state = rx_cpu_dump_state;
1513
cc->set_pc = rx_cpu_set_pc;
1514
- cc->tcg_ops.synchronize_from_tb = rx_cpu_synchronize_from_tb;
1515
+
1516
cc->gdb_read_register = rx_cpu_gdb_read_register;
1517
cc->gdb_write_register = rx_cpu_gdb_write_register;
1518
cc->get_phys_page_debug = rx_cpu_get_phys_page_debug;
1519
cc->disas_set_info = rx_cpu_disas_set_info;
1520
- cc->tcg_ops.initialize = rx_translate_init;
1521
- cc->tcg_ops.tlb_fill = rx_cpu_tlb_fill;
1522
1523
cc->gdb_num_core_regs = 26;
1524
cc->gdb_core_xml_file = "rx-core.xml";
1525
+ cc->tcg_ops = &rx_tcg_ops;
1526
}
1527
1528
static const TypeInfo rx_cpu_info = {
1529
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
817
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
1530
index XXXXXXX..XXXXXXX 100644
818
index XXXXXXX..XXXXXXX 100644
1531
--- a/target/s390x/cpu.c
819
--- a/target/s390x/cpu.c
1532
+++ b/target/s390x/cpu.c
820
+++ b/target/s390x/cpu.c
1533
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_full(DeviceState *dev)
821
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
1534
return s390_cpu_reset(s, S390_CPU_RESET_CLEAR);
822
1535
}
823
static const TCGCPUOps s390_tcg_ops = {
1536
824
.initialize = s390x_translate_init,
1537
+#ifdef CONFIG_TCG
825
+ .translate_code = s390x_translate_code,
1538
+#include "hw/core/tcg-cpu-ops.h"
826
.restore_state_to_opc = s390x_restore_state_to_opc,
1539
+
827
1540
+static struct TCGCPUOps s390_tcg_ops = {
828
#ifdef CONFIG_USER_ONLY
1541
+ .initialize = s390x_translate_init,
829
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
1542
+ .tlb_fill = s390_cpu_tlb_fill,
830
index XXXXXXX..XXXXXXX 100644
1543
+
831
--- a/target/s390x/tcg/translate.c
1544
+#if !defined(CONFIG_USER_ONLY)
832
+++ b/target/s390x/tcg/translate.c
1545
+ .cpu_exec_interrupt = s390_cpu_exec_interrupt,
833
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
1546
+ .do_interrupt = s390_cpu_do_interrupt,
834
.disas_log = s390x_tr_disas_log,
1547
+ .debug_excp_handler = s390x_cpu_debug_excp_handler,
835
};
1548
+ .do_unaligned_access = s390x_cpu_do_unaligned_access,
836
1549
+#endif /* !CONFIG_USER_ONLY */
837
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1550
+};
838
- vaddr pc, void *host_pc)
1551
+#endif /* CONFIG_TCG */
839
+void s390x_translate_code(CPUState *cs, TranslationBlock *tb,
1552
+
840
+ int *max_insns, vaddr pc, void *host_pc)
1553
static void s390_cpu_class_init(ObjectClass *oc, void *data)
841
{
1554
{
842
DisasContext dc;
1555
S390CPUClass *scc = S390_CPU_CLASS(oc);
843
1556
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
1557
scc->reset = s390_cpu_reset;
1558
cc->class_by_name = s390_cpu_class_by_name,
1559
cc->has_work = s390_cpu_has_work;
1560
-#ifdef CONFIG_TCG
1561
- cc->tcg_ops.do_interrupt = s390_cpu_do_interrupt;
1562
-#endif
1563
cc->dump_state = s390_cpu_dump_state;
1564
cc->set_pc = s390_cpu_set_pc;
1565
cc->gdb_read_register = s390_cpu_gdb_read_register;
1566
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
1567
cc->vmsd = &vmstate_s390_cpu;
1568
cc->get_crash_info = s390_cpu_get_crash_info;
1569
cc->write_elf64_note = s390_cpu_write_elf64_note;
1570
-#ifdef CONFIG_TCG
1571
- cc->tcg_ops.cpu_exec_interrupt = s390_cpu_exec_interrupt;
1572
- cc->tcg_ops.debug_excp_handler = s390x_cpu_debug_excp_handler;
1573
- cc->tcg_ops.do_unaligned_access = s390x_cpu_do_unaligned_access;
1574
-#endif
1575
#endif
1576
cc->disas_set_info = s390_cpu_disas_set_info;
1577
-#ifdef CONFIG_TCG
1578
- cc->tcg_ops.initialize = s390x_translate_init;
1579
- cc->tcg_ops.tlb_fill = s390_cpu_tlb_fill;
1580
-#endif
1581
-
1582
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
1583
cc->gdb_core_xml_file = "s390x-core64.xml";
1584
cc->gdb_arch_name = s390_gdb_arch_name;
1585
1586
s390_cpu_model_class_register_props(oc);
1587
+
1588
+#ifdef CONFIG_TCG
1589
+ cc->tcg_ops = &s390_tcg_ops;
1590
+#endif /* CONFIG_TCG */
1591
}
1592
1593
static const TypeInfo s390_cpu_type_info = {
1594
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
844
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
1595
index XXXXXXX..XXXXXXX 100644
845
index XXXXXXX..XXXXXXX 100644
1596
--- a/target/sh4/cpu.c
846
--- a/target/sh4/cpu.c
1597
+++ b/target/sh4/cpu.c
847
+++ b/target/sh4/cpu.c
1598
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_sh_cpu = {
848
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
1599
.unmigratable = 1,
849
1600
};
850
static const TCGCPUOps superh_tcg_ops = {
1601
851
.initialize = sh4_translate_init,
1602
+#include "hw/core/tcg-cpu-ops.h"
852
+ .translate_code = sh4_translate_code,
1603
+
853
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
1604
+static struct TCGCPUOps superh_tcg_ops = {
854
.restore_state_to_opc = superh_restore_state_to_opc,
1605
+ .initialize = sh4_translate_init,
855
1606
+ .synchronize_from_tb = superh_cpu_synchronize_from_tb,
856
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
1607
+ .cpu_exec_interrupt = superh_cpu_exec_interrupt,
857
index XXXXXXX..XXXXXXX 100644
1608
+ .tlb_fill = superh_cpu_tlb_fill,
858
--- a/target/sh4/translate.c
1609
+
859
+++ b/target/sh4/translate.c
1610
+#ifndef CONFIG_USER_ONLY
860
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
1611
+ .do_interrupt = superh_cpu_do_interrupt,
861
.tb_stop = sh4_tr_tb_stop,
1612
+ .do_unaligned_access = superh_cpu_do_unaligned_access,
862
};
1613
+#endif /* !CONFIG_USER_ONLY */
863
1614
+};
864
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1615
+
865
- vaddr pc, void *host_pc)
1616
static void superh_cpu_class_init(ObjectClass *oc, void *data)
866
+void sh4_translate_code(CPUState *cs, TranslationBlock *tb,
1617
{
867
+ int *max_insns, vaddr pc, void *host_pc)
1618
DeviceClass *dc = DEVICE_CLASS(oc);
868
{
1619
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
869
DisasContext ctx;
1620
870
1621
cc->class_by_name = superh_cpu_class_by_name;
1622
cc->has_work = superh_cpu_has_work;
1623
- cc->tcg_ops.do_interrupt = superh_cpu_do_interrupt;
1624
- cc->tcg_ops.cpu_exec_interrupt = superh_cpu_exec_interrupt;
1625
cc->dump_state = superh_cpu_dump_state;
1626
cc->set_pc = superh_cpu_set_pc;
1627
- cc->tcg_ops.synchronize_from_tb = superh_cpu_synchronize_from_tb;
1628
cc->gdb_read_register = superh_cpu_gdb_read_register;
1629
cc->gdb_write_register = superh_cpu_gdb_write_register;
1630
- cc->tcg_ops.tlb_fill = superh_cpu_tlb_fill;
1631
#ifndef CONFIG_USER_ONLY
1632
- cc->tcg_ops.do_unaligned_access = superh_cpu_do_unaligned_access;
1633
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
1634
#endif
1635
cc->disas_set_info = superh_cpu_disas_set_info;
1636
- cc->tcg_ops.initialize = sh4_translate_init;
1637
1638
cc->gdb_num_core_regs = 59;
1639
1640
dc->vmsd = &vmstate_sh_cpu;
1641
+ cc->tcg_ops = &superh_tcg_ops;
1642
}
1643
1644
#define DEFINE_SUPERH_CPU_TYPE(type_name, cinit, initfn) \
1645
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
871
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
1646
index XXXXXXX..XXXXXXX 100644
872
index XXXXXXX..XXXXXXX 100644
1647
--- a/target/sparc/cpu.c
873
--- a/target/sparc/cpu.c
1648
+++ b/target/sparc/cpu.c
874
+++ b/target/sparc/cpu.c
1649
@@ -XXX,XX +XXX,XX @@ static Property sparc_cpu_properties[] = {
875
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
1650
DEFINE_PROP_END_OF_LIST()
876
1651
};
877
static const TCGCPUOps sparc_tcg_ops = {
1652
878
.initialize = sparc_tcg_init,
1653
+#ifdef CONFIG_TCG
879
+ .translate_code = sparc_translate_code,
1654
+#include "hw/core/tcg-cpu-ops.h"
880
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
1655
+
881
.restore_state_to_opc = sparc_restore_state_to_opc,
1656
+static struct TCGCPUOps sparc_tcg_ops = {
882
1657
+ .initialize = sparc_tcg_init,
883
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
1658
+ .synchronize_from_tb = sparc_cpu_synchronize_from_tb,
884
index XXXXXXX..XXXXXXX 100644
1659
+ .cpu_exec_interrupt = sparc_cpu_exec_interrupt,
885
--- a/target/sparc/translate.c
1660
+ .tlb_fill = sparc_cpu_tlb_fill,
886
+++ b/target/sparc/translate.c
1661
+
887
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
1662
+#ifndef CONFIG_USER_ONLY
888
.tb_stop = sparc_tr_tb_stop,
1663
+ .do_interrupt = sparc_cpu_do_interrupt,
889
};
1664
+ .do_transaction_failed = sparc_cpu_do_transaction_failed,
890
1665
+ .do_unaligned_access = sparc_cpu_do_unaligned_access,
891
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1666
+#endif /* !CONFIG_USER_ONLY */
892
- vaddr pc, void *host_pc)
1667
+};
893
+void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
1668
+#endif /* CONFIG_TCG */
894
+ int *max_insns, vaddr pc, void *host_pc)
1669
+
895
{
1670
static void sparc_cpu_class_init(ObjectClass *oc, void *data)
896
DisasContext dc = {};
1671
{
897
1672
SPARCCPUClass *scc = SPARC_CPU_CLASS(oc);
1673
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
1674
cc->class_by_name = sparc_cpu_class_by_name;
1675
cc->parse_features = sparc_cpu_parse_features;
1676
cc->has_work = sparc_cpu_has_work;
1677
- cc->tcg_ops.do_interrupt = sparc_cpu_do_interrupt;
1678
- cc->tcg_ops.cpu_exec_interrupt = sparc_cpu_exec_interrupt;
1679
cc->dump_state = sparc_cpu_dump_state;
1680
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
1681
cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
1682
#endif
1683
cc->set_pc = sparc_cpu_set_pc;
1684
- cc->tcg_ops.synchronize_from_tb = sparc_cpu_synchronize_from_tb;
1685
cc->gdb_read_register = sparc_cpu_gdb_read_register;
1686
cc->gdb_write_register = sparc_cpu_gdb_write_register;
1687
- cc->tcg_ops.tlb_fill = sparc_cpu_tlb_fill;
1688
#ifndef CONFIG_USER_ONLY
1689
- cc->tcg_ops.do_transaction_failed = sparc_cpu_do_transaction_failed;
1690
- cc->tcg_ops.do_unaligned_access = sparc_cpu_do_unaligned_access;
1691
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
1692
cc->vmsd = &vmstate_sparc_cpu;
1693
#endif
1694
cc->disas_set_info = cpu_sparc_disas_set_info;
1695
- cc->tcg_ops.initialize = sparc_tcg_init;
1696
1697
#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
1698
cc->gdb_num_core_regs = 86;
1699
#else
1700
cc->gdb_num_core_regs = 72;
1701
#endif
1702
+ cc->tcg_ops = &sparc_tcg_ops;
1703
}
1704
1705
static const TypeInfo sparc_cpu_type_info = {
1706
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
1707
index XXXXXXX..XXXXXXX 100644
1708
--- a/target/tilegx/cpu.c
1709
+++ b/target/tilegx/cpu.c
1710
@@ -XXX,XX +XXX,XX @@ static bool tilegx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1711
return false;
1712
}
1713
1714
+#include "hw/core/tcg-cpu-ops.h"
1715
+
1716
+static struct TCGCPUOps tilegx_tcg_ops = {
1717
+ .initialize = tilegx_tcg_init,
1718
+ .cpu_exec_interrupt = tilegx_cpu_exec_interrupt,
1719
+ .tlb_fill = tilegx_cpu_tlb_fill,
1720
+
1721
+#ifndef CONFIG_USER_ONLY
1722
+ .do_interrupt = tilegx_cpu_do_interrupt,
1723
+#endif /* !CONFIG_USER_ONLY */
1724
+};
1725
+
1726
static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
1727
{
1728
DeviceClass *dc = DEVICE_CLASS(oc);
1729
@@ -XXX,XX +XXX,XX @@ static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
1730
1731
cc->class_by_name = tilegx_cpu_class_by_name;
1732
cc->has_work = tilegx_cpu_has_work;
1733
- cc->tcg_ops.do_interrupt = tilegx_cpu_do_interrupt;
1734
- cc->tcg_ops.cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
1735
cc->dump_state = tilegx_cpu_dump_state;
1736
cc->set_pc = tilegx_cpu_set_pc;
1737
- cc->tcg_ops.tlb_fill = tilegx_cpu_tlb_fill;
1738
cc->gdb_num_core_regs = 0;
1739
- cc->tcg_ops.initialize = tilegx_tcg_init;
1740
+ cc->tcg_ops = &tilegx_tcg_ops;
1741
}
1742
1743
static const TypeInfo tilegx_cpu_type_info = {
1744
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
898
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
1745
index XXXXXXX..XXXXXXX 100644
899
index XXXXXXX..XXXXXXX 100644
1746
--- a/target/tricore/cpu.c
900
--- a/target/tricore/cpu.c
1747
+++ b/target/tricore/cpu.c
901
+++ b/target/tricore/cpu.c
1748
@@ -XXX,XX +XXX,XX @@ static void tc27x_initfn(Object *obj)
902
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
1749
set_feature(&cpu->env, TRICORE_FEATURE_161);
903
1750
}
904
static const TCGCPUOps tricore_tcg_ops = {
1751
905
.initialize = tricore_tcg_init,
1752
+#include "hw/core/tcg-cpu-ops.h"
906
+ .translate_code = tricore_translate_code,
1753
+
907
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
1754
+static struct TCGCPUOps tricore_tcg_ops = {
908
.restore_state_to_opc = tricore_restore_state_to_opc,
1755
+ .initialize = tricore_tcg_init,
909
.tlb_fill = tricore_cpu_tlb_fill,
1756
+ .synchronize_from_tb = tricore_cpu_synchronize_from_tb,
910
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
1757
+ .tlb_fill = tricore_cpu_tlb_fill,
911
index XXXXXXX..XXXXXXX 100644
1758
+};
912
--- a/target/tricore/translate.c
1759
+
913
+++ b/target/tricore/translate.c
1760
static void tricore_cpu_class_init(ObjectClass *c, void *data)
914
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
1761
{
915
.tb_stop = tricore_tr_tb_stop,
1762
TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c);
916
};
1763
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
917
1764
918
-
1765
cc->dump_state = tricore_cpu_dump_state;
919
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
1766
cc->set_pc = tricore_cpu_set_pc;
920
- vaddr pc, void *host_pc)
1767
- cc->tcg_ops.synchronize_from_tb = tricore_cpu_synchronize_from_tb;
921
+void tricore_translate_code(CPUState *cs, TranslationBlock *tb,
1768
cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug;
922
+ int *max_insns, vaddr pc, void *host_pc)
1769
- cc->tcg_ops.initialize = tricore_tcg_init;
923
{
1770
- cc->tcg_ops.tlb_fill = tricore_cpu_tlb_fill;
924
DisasContext ctx;
1771
+ cc->tcg_ops = &tricore_tcg_ops;
925
translator_loop(cs, tb, max_insns, pc, host_pc,
1772
}
1773
1774
#define DEFINE_TRICORE_CPU_TYPE(cpu_model, initfn) \
1775
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
1776
index XXXXXXX..XXXXXXX 100644
1777
--- a/target/unicore32/cpu.c
1778
+++ b/target/unicore32/cpu.c
1779
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_uc32_cpu = {
1780
.unmigratable = 1,
1781
};
1782
1783
+#include "hw/core/tcg-cpu-ops.h"
1784
+
1785
+static struct TCGCPUOps uc32_tcg_ops = {
1786
+ .initialize = uc32_translate_init,
1787
+ .cpu_exec_interrupt = uc32_cpu_exec_interrupt,
1788
+ .tlb_fill = uc32_cpu_tlb_fill,
1789
+
1790
+#ifndef CONFIG_USER_ONLY
1791
+ .do_interrupt = uc32_cpu_do_interrupt,
1792
+#endif /* !CONFIG_USER_ONLY */
1793
+};
1794
+
1795
static void uc32_cpu_class_init(ObjectClass *oc, void *data)
1796
{
1797
DeviceClass *dc = DEVICE_CLASS(oc);
1798
@@ -XXX,XX +XXX,XX @@ static void uc32_cpu_class_init(ObjectClass *oc, void *data)
1799
1800
cc->class_by_name = uc32_cpu_class_by_name;
1801
cc->has_work = uc32_cpu_has_work;
1802
- cc->tcg_ops.do_interrupt = uc32_cpu_do_interrupt;
1803
- cc->tcg_ops.cpu_exec_interrupt = uc32_cpu_exec_interrupt;
1804
cc->dump_state = uc32_cpu_dump_state;
1805
cc->set_pc = uc32_cpu_set_pc;
1806
- cc->tcg_ops.tlb_fill = uc32_cpu_tlb_fill;
1807
cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
1808
- cc->tcg_ops.initialize = uc32_translate_init;
1809
dc->vmsd = &vmstate_uc32_cpu;
1810
+ cc->tcg_ops = &uc32_tcg_ops;
1811
}
1812
1813
#define DEFINE_UNICORE32_CPU_TYPE(cpu_model, initfn) \
1814
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
926
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
1815
index XXXXXXX..XXXXXXX 100644
927
index XXXXXXX..XXXXXXX 100644
1816
--- a/target/xtensa/cpu.c
928
--- a/target/xtensa/cpu.c
1817
+++ b/target/xtensa/cpu.c
929
+++ b/target/xtensa/cpu.c
1818
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_xtensa_cpu = {
930
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
1819
.unmigratable = 1,
931
1820
};
932
static const TCGCPUOps xtensa_tcg_ops = {
1821
933
.initialize = xtensa_translate_init,
1822
+#include "hw/core/tcg-cpu-ops.h"
934
+ .translate_code = xtensa_translate_code,
1823
+
935
.debug_excp_handler = xtensa_breakpoint_handler,
1824
+static struct TCGCPUOps xtensa_tcg_ops = {
936
.restore_state_to_opc = xtensa_restore_state_to_opc,
1825
+ .initialize = xtensa_translate_init,
937
1826
+ .cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
938
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
1827
+ .tlb_fill = xtensa_cpu_tlb_fill,
939
index XXXXXXX..XXXXXXX 100644
1828
+ .debug_excp_handler = xtensa_breakpoint_handler,
940
--- a/target/xtensa/translate.c
1829
+
941
+++ b/target/xtensa/translate.c
1830
+#ifndef CONFIG_USER_ONLY
942
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
1831
+ .do_interrupt = xtensa_cpu_do_interrupt,
943
.tb_stop = xtensa_tr_tb_stop,
1832
+ .do_transaction_failed = xtensa_cpu_do_transaction_failed,
944
};
1833
+ .do_unaligned_access = xtensa_cpu_do_unaligned_access,
945
1834
+#endif /* !CONFIG_USER_ONLY */
946
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
1835
+};
947
- vaddr pc, void *host_pc)
1836
+
948
+void xtensa_translate_code(CPUState *cpu, TranslationBlock *tb,
1837
static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
949
+ int *max_insns, vaddr pc, void *host_pc)
1838
{
950
{
1839
DeviceClass *dc = DEVICE_CLASS(oc);
951
DisasContext dc = {};
1840
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
952
translator_loop(cpu, tb, max_insns, pc, host_pc,
1841
1842
cc->class_by_name = xtensa_cpu_class_by_name;
1843
cc->has_work = xtensa_cpu_has_work;
1844
- cc->tcg_ops.do_interrupt = xtensa_cpu_do_interrupt;
1845
- cc->tcg_ops.cpu_exec_interrupt = xtensa_cpu_exec_interrupt;
1846
cc->dump_state = xtensa_cpu_dump_state;
1847
cc->set_pc = xtensa_cpu_set_pc;
1848
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
1849
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
1850
cc->gdb_stop_before_watchpoint = true;
1851
- cc->tcg_ops.tlb_fill = xtensa_cpu_tlb_fill;
1852
#ifndef CONFIG_USER_ONLY
1853
- cc->tcg_ops.do_unaligned_access = xtensa_cpu_do_unaligned_access;
1854
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
1855
- cc->tcg_ops.do_transaction_failed = xtensa_cpu_do_transaction_failed;
1856
#endif
1857
- cc->tcg_ops.debug_excp_handler = xtensa_breakpoint_handler;
1858
cc->disas_set_info = xtensa_cpu_disas_set_info;
1859
- cc->tcg_ops.initialize = xtensa_translate_init;
1860
dc->vmsd = &vmstate_xtensa_cpu;
1861
+ cc->tcg_ops = &xtensa_tcg_ops;
1862
}
1863
1864
static const TypeInfo xtensa_cpu_type_info = {
1865
diff --git a/target/ppc/translate_init.c.inc b/target/ppc/translate_init.c.inc
1866
index XXXXXXX..XXXXXXX 100644
1867
--- a/target/ppc/translate_init.c.inc
1868
+++ b/target/ppc/translate_init.c.inc
1869
@@ -XXX,XX +XXX,XX @@ static Property ppc_cpu_properties[] = {
1870
DEFINE_PROP_END_OF_LIST(),
1871
};
1872
1873
+#ifdef CONFIG_TCG
1874
+#include "hw/core/tcg-cpu-ops.h"
1875
+
1876
+static struct TCGCPUOps ppc_tcg_ops = {
1877
+ .initialize = ppc_translate_init,
1878
+ .cpu_exec_interrupt = ppc_cpu_exec_interrupt,
1879
+ .tlb_fill = ppc_cpu_tlb_fill,
1880
+
1881
+#ifndef CONFIG_USER_ONLY
1882
+ .do_interrupt = ppc_cpu_do_interrupt,
1883
+ .cpu_exec_enter = ppc_cpu_exec_enter,
1884
+ .cpu_exec_exit = ppc_cpu_exec_exit,
1885
+ .do_unaligned_access = ppc_cpu_do_unaligned_access,
1886
+#endif /* !CONFIG_USER_ONLY */
1887
+};
1888
+#endif /* CONFIG_TCG */
1889
+
1890
static void ppc_cpu_class_init(ObjectClass *oc, void *data)
1891
{
1892
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
1893
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
1894
#ifndef CONFIG_USER_ONLY
1895
cc->virtio_is_big_endian = ppc_cpu_is_big_endian;
1896
#endif
1897
-#ifdef CONFIG_TCG
1898
- cc->tcg_ops.initialize = ppc_translate_init;
1899
- cc->tcg_ops.cpu_exec_interrupt = ppc_cpu_exec_interrupt;
1900
- cc->tcg_ops.do_interrupt = ppc_cpu_do_interrupt;
1901
- cc->tcg_ops.tlb_fill = ppc_cpu_tlb_fill;
1902
-#ifndef CONFIG_USER_ONLY
1903
- cc->tcg_ops.cpu_exec_enter = ppc_cpu_exec_enter;
1904
- cc->tcg_ops.cpu_exec_exit = ppc_cpu_exec_exit;
1905
- cc->tcg_ops.do_unaligned_access = ppc_cpu_do_unaligned_access;
1906
-#endif /* !CONFIG_USER_ONLY */
1907
-#endif /* CONFIG_TCG */
1908
-
1909
cc->disas_set_info = ppc_disas_set_info;
1910
1911
dc->fw_name = "PowerPC,UNKNOWN";
1912
+
1913
+#ifdef CONFIG_TCG
1914
+ cc->tcg_ops = &ppc_tcg_ops;
1915
+#endif /* CONFIG_TCG */
1916
}
1917
1918
static const TypeInfo ppc_cpu_type_info = {
1919
diff --git a/MAINTAINERS b/MAINTAINERS
1920
index XXXXXXX..XXXXXXX 100644
1921
--- a/MAINTAINERS
1922
+++ b/MAINTAINERS
1923
@@ -XXX,XX +XXX,XX @@ F: include/exec/helper*.h
1924
F: include/exec/tb-hash.h
1925
F: include/sysemu/cpus.h
1926
F: include/sysemu/tcg.h
1927
+F: include/hw/core/tcg-cpu-ops.h
1928
1929
FPU emulation
1930
M: Aurelien Jarno <aurelien@aurel32.net>
1931
--
953
--
1932
2.25.1
954
2.43.0
1933
955
1934
956
diff view generated by jsdifflib