1
TCG patch queue, plus one target/sh4 patch that
1
The following changes since commit b52daaf2c868f2bab102eb5acbf55b2917f46aea:
2
Yoshinori Sato asked me to process.
2
3
3
Merge tag 'pull-block-2023-06-05' of https://gitlab.com/hreitz/qemu into staging (2023-06-05 10:27:31 -0700)
4
5
r~
6
7
8
The following changes since commit efbf38d73e5dcc4d5f8b98c6e7a12be1f3b91745:
9
10
Merge tag 'for-upstream' of git://repo.or.cz/qemu/kevin into staging (2022-10-03 15:06:07 -0400)
11
4
12
are available in the Git repository at:
5
are available in the Git repository at:
13
6
14
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20221004
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230605
15
8
16
for you to fetch changes up to ab419fd8a035a65942de4e63effcd55ccbf1a9fe:
9
for you to fetch changes up to a7f6911c127b1dd1b8764e03b0ebcf0a227a15e4:
17
10
18
target/sh4: Fix TB_FLAG_UNALIGN (2022-10-04 12:33:05 -0700)
11
tcg/tcg-op-vec: Remove left over _link_error() definitions (2023-06-05 12:20:16 -0700)
19
12
20
----------------------------------------------------------------
13
----------------------------------------------------------------
21
Cache CPUClass for use in hot code paths.
14
Build tcg/ once for system and once for user.
22
Add CPUTLBEntryFull, probe_access_full, tlb_set_page_full.
15
Unmap perf_marker.
23
Add generic support for TARGET_TB_PCREL.
16
Remove left over _link_error() definitions.
24
tcg/ppc: Optimize 26-bit jumps using STQ for POWER 2.07
25
target/sh4: Fix TB_FLAG_UNALIGN
26
17
27
----------------------------------------------------------------
18
----------------------------------------------------------------
28
Alex Bennée (3):
19
Ilya Leoshkevich (1):
29
cpu: cache CPUClass in CPUState for hot code paths
20
accel/tcg: Unmap perf_marker
30
hw/core/cpu-sysemu: used cached class in cpu_asidx_from_attrs
21
31
cputlb: used cached CPUClass in our hot-paths
22
Philippe Mathieu-Daudé (2):
32
23
target/ppc: Inline gen_icount_io_start()
33
Leandro Lupori (1):
24
tcg/tcg-op-vec: Remove left over _link_error() definitions
34
tcg/ppc: Optimize 26-bit jumps
25
35
26
Richard Henderson (49):
36
Richard Henderson (16):
27
tcg/ppc: Remove TARGET_LONG_BITS, TCG_TYPE_TL
37
accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
28
tcg/riscv: Remove TARGET_LONG_BITS, TCG_TYPE_TL
38
accel/tcg: Drop addr member from SavedIOTLB
29
tcg/s390x: Remove TARGET_LONG_BITS, TCG_TYPE_TL
39
accel/tcg: Suppress auto-invalidate in probe_access_internal
30
tcg/sparc64: Remove TARGET_LONG_BITS, TCG_TYPE_TL
40
accel/tcg: Introduce probe_access_full
31
tcg: Move TCG_TYPE_TL from tcg.h to tcg-op.h
41
accel/tcg: Introduce tlb_set_page_full
32
tcg: Widen CPUTLBEntry comparators to 64-bits
42
include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA
33
tcg: Add tlb_fast_offset to TCGContext
43
accel/tcg: Remove PageDesc code_bitmap
34
target/avr: Add missing includes of qemu/error-report.h
44
accel/tcg: Use bool for page_find_alloc
35
target/*: Add missing includes of tcg/debug-assert.h
45
accel/tcg: Use DisasContextBase in plugin_gen_tb_start
36
*: Add missing includes of tcg/tcg.h
46
accel/tcg: Do not align tb->page_addr[0]
37
tcg: Split out tcg-target-reg-bits.h
47
accel/tcg: Inline tb_flush_jmp_cache
38
target/arm: Fix test of TCG_OVERSIZED_GUEST
48
include/hw/core: Create struct CPUJumpCache
39
tcg: Split out tcg/oversized-guest.h
49
hw/core: Add CPUClass.get_pc
40
tcg: Move TCGv, dup_const_tl definitions to tcg-op.h
50
accel/tcg: Introduce tb_pc and log_pc
41
tcg: Split tcg/tcg-op-common.h from tcg/tcg-op.h
51
accel/tcg: Introduce TARGET_TB_PCREL
42
target/arm: Include helper-gen.h in translator.h
52
target/sh4: Fix TB_FLAG_UNALIGN
43
target/hexagon: Include helper-gen.h where needed
53
44
tcg: Remove outdated comments in helper-head.h
54
accel/tcg/internal.h | 10 ++
45
tcg: Move TCGHelperInfo and dependencies to tcg/helper-info.h
55
accel/tcg/tb-hash.h | 1 +
46
tcg: Pass TCGHelperInfo to tcg_gen_callN
56
accel/tcg/tb-jmp-cache.h | 65 ++++++++
47
tcg: Move temp_idx and tcgv_i32_temp debug out of line
57
include/exec/cpu-common.h | 1 +
48
tcg: Split tcg_gen_callN
58
include/exec/cpu-defs.h | 48 ++++--
49
tcg: Split helper-gen.h
59
include/exec/exec-all.h | 75 ++++++++-
50
tcg: Split helper-proto.h
60
include/exec/plugin-gen.h | 7 +-
51
target/sh4: Emit insn_start for each insn in gUSA region
61
include/hw/core/cpu.h | 28 ++--
52
tcg: Add insn_start_words to TCGContext
62
include/qemu/typedefs.h | 2 +
53
tcg: Add guest_mo to TCGContext
63
include/tcg/tcg.h | 2 +-
54
tcg: Move TLB_FLAGS_MASK check out of get_alignment_bits
64
target/sh4/cpu.h | 56 ++++---
55
tcg: Split tcg/tcg-op-gvec.h
65
accel/stubs/tcg-stub.c | 4 +
56
tcg: Remove NO_CPU_IO_DEFS
66
accel/tcg/cpu-exec.c | 80 +++++-----
57
exec-all: Widen tb_page_addr_t for user-only
67
accel/tcg/cputlb.c | 259 ++++++++++++++++++--------------
58
exec-all: Widen TranslationBlock pc and cs_base to 64-bits
68
accel/tcg/plugin-gen.c | 22 +--
59
tcg: Spit out exec/translation-block.h
69
accel/tcg/translate-all.c | 214 ++++++++++++--------------
60
include/exec: Remove CODE_GEN_AVG_BLOCK_SIZE
70
accel/tcg/translator.c | 2 +-
61
accel/tcg: Move most of gen-icount.h into translator.c
71
cpu.c | 9 +-
62
accel/tcg: Introduce translator_io_start
72
hw/core/cpu-common.c | 3 +-
63
accel/tcg: Move translator_fake_ldb out of line
73
hw/core/cpu-sysemu.c | 5 +-
64
target/arm: Tidy helpers for translation
74
linux-user/sh4/signal.c | 6 +-
65
target/mips: Tidy helpers for translation
75
plugins/core.c | 2 +-
66
target/*: Add missing includes of exec/translation-block.h
76
target/alpha/cpu.c | 9 ++
67
target/arm: Add missing include of exec/exec-all.h
77
target/arm/cpu.c | 17 ++-
68
accel/tcg: Tidy includes for translator.[ch]
78
target/arm/mte_helper.c | 14 +-
69
tcg: Fix PAGE/PROT confusion
79
target/arm/sve_helper.c | 4 +-
70
tcg: Move env defines out of NEED_CPU_H in helper-head.h
80
target/arm/translate-a64.c | 2 +-
71
tcg: Remove target-specific headers from tcg.[ch]
81
target/avr/cpu.c | 10 +-
72
plugins: Move plugin_insn_append to translator.c
82
target/cris/cpu.c | 8 +
73
plugins: Drop unused headers from exec/plugin-gen.h
83
target/hexagon/cpu.c | 10 +-
74
exec/poison: Do not poison CONFIG_SOFTMMU
84
target/hppa/cpu.c | 12 +-
75
tcg: Build once for system and once for user-only
85
target/i386/cpu.c | 9 ++
76
86
target/i386/tcg/tcg-cpu.c | 2 +-
77
MAINTAINERS | 3 +-
87
target/loongarch/cpu.c | 11 +-
78
include/exec/cpu-all.h | 3 +
88
target/m68k/cpu.c | 8 +
79
include/exec/cpu-defs.h | 50 +-
89
target/microblaze/cpu.c | 10 +-
80
include/exec/cpu_ldst.h | 22 +-
90
target/mips/cpu.c | 8 +
81
include/exec/exec-all.h | 142 +--
91
target/mips/tcg/exception.c | 2 +-
82
include/exec/gen-icount.h | 83 --
92
target/mips/tcg/sysemu/special_helper.c | 2 +-
83
include/exec/helper-gen-common.h | 18 +
93
target/nios2/cpu.c | 9 ++
84
include/exec/helper-gen.h | 97 +-
94
target/openrisc/cpu.c | 10 +-
85
include/exec/helper-head.h | 24 +-
95
target/ppc/cpu_init.c | 8 +
86
include/exec/helper-proto-common.h | 18 +
96
target/riscv/cpu.c | 17 ++-
87
include/exec/helper-proto.h | 73 +-
97
target/rx/cpu.c | 10 +-
88
include/exec/helper-tcg.h | 75 --
98
target/s390x/cpu.c | 8 +
89
include/exec/plugin-gen.h | 24 -
99
target/s390x/tcg/mem_helper.c | 4 -
90
include/exec/poison.h | 1 -
100
target/sh4/cpu.c | 18 ++-
91
include/exec/tlb-common.h | 56 ++
101
target/sh4/helper.c | 6 +-
92
include/exec/translation-block.h | 149 +++
102
target/sh4/translate.c | 90 +++++------
93
include/exec/translator.h | 24 +-
103
target/sparc/cpu.c | 10 +-
94
include/qemu/typedefs.h | 1 +
104
target/tricore/cpu.c | 11 +-
95
include/tcg/helper-info.h | 64 ++
105
target/xtensa/cpu.c | 8 +
96
include/tcg/insn-start-words.h | 17 +
106
tcg/tcg.c | 8 +-
97
include/tcg/oversized-guest.h | 23 +
107
trace/control-target.c | 2 +-
98
include/tcg/tcg-op-common.h | 996 +++++++++++++++++++
108
tcg/ppc/tcg-target.c.inc | 119 +++++++++++----
99
include/tcg/tcg-op-gvec-common.h | 426 ++++++++
109
55 files changed, 915 insertions(+), 462 deletions(-)
100
include/tcg/tcg-op-gvec.h | 444 +--------
110
create mode 100644 accel/tcg/tb-jmp-cache.h
101
include/tcg/tcg-op.h | 1033 +-------------------
111
102
include/tcg/tcg-opc.h | 6 +-
103
include/tcg/tcg.h | 107 +-
104
target/arm/cpregs.h | 4 +-
105
target/arm/tcg/translate.h | 5 +
106
target/mips/tcg/translate.h | 5 +-
107
target/ppc/cpu.h | 2 -
108
target/sparc/cpu.h | 2 -
109
tcg/aarch64/tcg-target-reg-bits.h | 12 +
110
tcg/arm/tcg-target-reg-bits.h | 12 +
111
tcg/i386/tcg-target-reg-bits.h | 16 +
112
tcg/i386/tcg-target.h | 2 -
113
tcg/loongarch64/tcg-target-reg-bits.h | 21 +
114
tcg/loongarch64/tcg-target.h | 11 -
115
tcg/mips/tcg-target-reg-bits.h | 18 +
116
tcg/mips/tcg-target.h | 8 -
117
tcg/ppc/tcg-target-reg-bits.h | 16 +
118
tcg/ppc/tcg-target.h | 5 -
119
tcg/riscv/tcg-target-reg-bits.h | 19 +
120
tcg/riscv/tcg-target.h | 9 -
121
tcg/s390x/tcg-target-reg-bits.h | 17 +
122
tcg/sparc64/tcg-target-reg-bits.h | 12 +
123
tcg/tcg-internal.h | 47 +-
124
tcg/tci/tcg-target-reg-bits.h | 18 +
125
tcg/tci/tcg-target.h | 8 -
126
include/exec/helper-gen.h.inc | 102 ++
127
include/exec/helper-proto.h.inc | 68 ++
128
accel/tcg/cpu-exec.c | 2 +-
129
accel/tcg/cputlb.c | 12 +-
130
accel/tcg/monitor.c | 1 +
131
accel/tcg/perf.c | 19 +-
132
accel/tcg/plugin-gen.c | 6 +
133
accel/tcg/tcg-accel-ops-mttcg.c | 2 +-
134
accel/tcg/tcg-accel-ops-rr.c | 2 +-
135
accel/tcg/tcg-all.c | 1 +
136
accel/tcg/tcg-runtime-gvec.c | 2 +-
137
accel/tcg/tcg-runtime.c | 6 +-
138
accel/tcg/translate-all.c | 30 +-
139
accel/tcg/translator.c | 140 ++-
140
target/alpha/translate.c | 18 +-
141
target/arm/ptw.c | 8 +-
142
target/arm/tcg/translate-a64.c | 42 +-
143
target/arm/tcg/translate-m-nocp.c | 2 -
144
target/arm/tcg/translate-mve.c | 4 -
145
target/arm/tcg/translate-neon.c | 4 -
146
target/arm/tcg/translate-sme.c | 7 -
147
target/arm/tcg/translate-sve.c | 11 -
148
target/arm/tcg/translate-vfp.c | 7 +-
149
target/arm/tcg/translate.c | 41 +-
150
target/avr/cpu.c | 1 +
151
target/avr/helper.c | 1 +
152
target/avr/translate.c | 6 +-
153
target/cris/translate.c | 8 +-
154
target/hexagon/genptr.c | 1 +
155
target/hexagon/translate.c | 7 +
156
target/hppa/translate.c | 10 +-
157
target/i386/helper.c | 3 +
158
target/i386/tcg/translate.c | 57 +-
159
target/loongarch/translate.c | 7 +-
160
target/m68k/translate.c | 5 +-
161
target/microblaze/translate.c | 6 +-
162
target/mips/tcg/msa_translate.c | 3 -
163
target/mips/tcg/mxu_translate.c | 2 -
164
target/mips/tcg/octeon_translate.c | 4 +-
165
target/mips/tcg/rel6_translate.c | 2 -
166
target/mips/tcg/translate.c | 53 +-
167
target/mips/tcg/translate_addr_const.c | 1 -
168
target/mips/tcg/tx79_translate.c | 4 +-
169
target/mips/tcg/vr54xx_translate.c | 3 -
170
target/nios2/translate.c | 6 +-
171
target/openrisc/sys_helper.c | 1 +
172
target/openrisc/translate.c | 14 +-
173
target/ppc/translate.c | 78 +-
174
target/riscv/cpu_helper.c | 1 +
175
target/riscv/translate.c | 6 +-
176
target/rx/cpu.c | 1 +
177
target/rx/op_helper.c | 1 +
178
target/rx/translate.c | 7 +-
179
target/s390x/tcg/translate.c | 10 +-
180
target/sh4/translate.c | 21 +-
181
target/sparc/translate.c | 78 +-
182
target/tricore/cpu.c | 1 +
183
target/tricore/translate.c | 7 +-
184
target/xtensa/translate.c | 31 +-
185
tcg/optimize.c | 2 +-
186
tcg/region.c | 20 +-
187
tcg/tcg-op-gvec.c | 4 +-
188
tcg/tcg-op-ldst.c | 26 +-
189
tcg/tcg-op-vec.c | 13 +-
190
tcg/tcg-op.c | 4 +-
191
tcg/tcg.c | 218 +++--
192
tcg/tci.c | 3 +-
193
include/exec/helper-info.c.inc | 96 ++
194
target/loongarch/insn_trans/trans_extra.c.inc | 4 +-
195
target/loongarch/insn_trans/trans_privileged.c.inc | 4 +-
196
target/ppc/power8-pmu-regs.c.inc | 10 +-
197
target/ppc/translate/branch-impl.c.inc | 2 +-
198
target/riscv/insn_trans/trans_privileged.c.inc | 8 +-
199
target/riscv/insn_trans/trans_rvi.c.inc | 24 +-
200
tcg/aarch64/tcg-target.c.inc | 8 +-
201
tcg/arm/tcg-target.c.inc | 8 +-
202
tcg/i386/tcg-target.c.inc | 9 +-
203
tcg/loongarch64/tcg-target.c.inc | 8 +-
204
tcg/mips/tcg-target.c.inc | 20 +-
205
tcg/ppc/tcg-target.c.inc | 46 +-
206
tcg/riscv/tcg-target.c.inc | 21 +-
207
tcg/s390x/tcg-target.c.inc | 22 +-
208
tcg/sparc64/tcg-target.c.inc | 20 +-
209
scripts/make-config-poison.sh | 5 +-
210
target/hexagon/idef-parser/idef-parser.y | 3 +-
211
tcg/meson.build | 30 +-
212
135 files changed, 3088 insertions(+), 2782 deletions(-)
213
delete mode 100644 include/exec/gen-icount.h
214
create mode 100644 include/exec/helper-gen-common.h
215
create mode 100644 include/exec/helper-proto-common.h
216
delete mode 100644 include/exec/helper-tcg.h
217
create mode 100644 include/exec/tlb-common.h
218
create mode 100644 include/exec/translation-block.h
219
create mode 100644 include/tcg/helper-info.h
220
create mode 100644 include/tcg/insn-start-words.h
221
create mode 100644 include/tcg/oversized-guest.h
222
create mode 100644 include/tcg/tcg-op-common.h
223
create mode 100644 include/tcg/tcg-op-gvec-common.h
224
create mode 100644 tcg/aarch64/tcg-target-reg-bits.h
225
create mode 100644 tcg/arm/tcg-target-reg-bits.h
226
create mode 100644 tcg/i386/tcg-target-reg-bits.h
227
create mode 100644 tcg/loongarch64/tcg-target-reg-bits.h
228
create mode 100644 tcg/mips/tcg-target-reg-bits.h
229
create mode 100644 tcg/ppc/tcg-target-reg-bits.h
230
create mode 100644 tcg/riscv/tcg-target-reg-bits.h
231
create mode 100644 tcg/s390x/tcg-target-reg-bits.h
232
create mode 100644 tcg/sparc64/tcg-target-reg-bits.h
233
create mode 100644 tcg/tci/tcg-target-reg-bits.h
234
create mode 100644 include/exec/helper-gen.h.inc
235
create mode 100644 include/exec/helper-proto.h.inc
236
create mode 100644 include/exec/helper-info.c.inc
237
diff view generated by jsdifflib
1
From: Leandro Lupori <leandro.lupori@eldorado.org.br>
1
All uses replaced with TCGContext.addr_type.
2
2
3
PowerPC64 processors handle direct branches better than indirect
3
Reviewed-by: Anton Johansson <anjo@rev.ng>
4
ones, resulting in less stalled cycles and branch misses.
5
6
However, PPC's tb_target_set_jmp_target() was only using direct
7
branches for 16-bit jumps, while PowerPC64's unconditional branch
8
instructions are able to handle displacements of up to 26 bits.
9
To take advantage of this, now jumps whose displacements fit in
10
between 17 and 26 bits are also converted to direct branches.
11
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br>
14
[rth: Expanded some commentary.]
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
5
---
17
tcg/ppc/tcg-target.c.inc | 119 +++++++++++++++++++++++++++++----------
6
tcg/ppc/tcg-target.c.inc | 21 +++++++++++----------
18
1 file changed, 88 insertions(+), 31 deletions(-)
7
1 file changed, 11 insertions(+), 10 deletions(-)
19
8
20
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
9
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
21
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/ppc/tcg-target.c.inc
11
--- a/tcg/ppc/tcg-target.c.inc
23
+++ b/tcg/ppc/tcg-target.c.inc
12
+++ b/tcg/ppc/tcg-target.c.inc
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
13
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
25
tcg_out32(s, insn);
14
TCGReg addrlo, TCGReg addrhi,
26
}
15
MemOpIdx oi, bool is_ld)
27
28
+static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2)
29
+{
30
+ if (HOST_BIG_ENDIAN) {
31
+ return (uint64_t)i1 << 32 | i2;
32
+ }
33
+ return (uint64_t)i2 << 32 | i1;
34
+}
35
+
36
+static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw,
37
+ tcg_insn_unit i0, tcg_insn_unit i1)
38
+{
39
+#if TCG_TARGET_REG_BITS == 64
40
+ qatomic_set((uint64_t *)rw, make_pair(i0, i1));
41
+ flush_idcache_range(rx, rw, 8);
42
+#else
43
+ qemu_build_not_reached();
44
+#endif
45
+}
46
+
47
+static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw,
48
+ tcg_insn_unit i0, tcg_insn_unit i1,
49
+ tcg_insn_unit i2, tcg_insn_unit i3)
50
+{
51
+ uint64_t p[2];
52
+
53
+ p[!HOST_BIG_ENDIAN] = make_pair(i0, i1);
54
+ p[HOST_BIG_ENDIAN] = make_pair(i2, i3);
55
+
56
+ /*
57
+ * There's no convenient way to get the compiler to allocate a pair
58
+ * of registers at an even index, so copy into r6/r7 and clobber.
59
+ */
60
+ asm("mr %%r6, %1\n\t"
61
+ "mr %%r7, %2\n\t"
62
+ "stq %%r6, %0"
63
+ : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7");
64
+ flush_idcache_range(rx, rw, 16);
65
+}
66
+
67
void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
68
uintptr_t jmp_rw, uintptr_t addr)
69
{
16
{
70
- if (TCG_TARGET_REG_BITS == 64) {
17
+ TCGType addr_type = s->addr_type;
71
- tcg_insn_unit i1, i2;
18
TCGLabelQemuLdst *ldst = NULL;
72
- intptr_t tb_diff = addr - tc_ptr;
19
MemOp opc = get_memop(oi);
73
- intptr_t br_diff = addr - (jmp_rx + 4);
20
MemOp a_bits, s_bits;
74
- uint64_t pair;
21
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
75
+ tcg_insn_unit i0, i1, i2, i3;
22
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
76
+ intptr_t tb_diff = addr - tc_ptr;
23
77
+ intptr_t br_diff = addr - (jmp_rx + 4);
24
/* Load the (low part) TLB comparator into TMP2. */
78
+ intptr_t lo, hi;
25
- if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
79
26
- uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
80
- /* This does not exercise the range of the branch, but we do
27
+ if (cmp_off == 0
81
- still need to be able to load the new value of TCG_REG_TB.
28
+ && (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32)) {
82
- But this does still happen quite often. */
29
+ uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32
83
- if (tb_diff == (int16_t)tb_diff) {
30
? LWZUX : LDUX);
84
- i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
31
tcg_out32(s, lxu | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
85
- i2 = B | (br_diff & 0x3fffffc);
32
} else {
86
- } else {
33
tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
87
- intptr_t lo = (int16_t)tb_diff;
34
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
88
- intptr_t hi = (int32_t)(tb_diff - lo);
35
+ if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
89
- assert(tb_diff == hi + lo);
36
tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2,
90
- i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
37
TCG_REG_TMP1, cmp_off + 4 * HOST_BIG_ENDIAN);
91
- i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
38
} else {
92
- }
39
- tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
93
-#if HOST_BIG_ENDIAN
40
+ tcg_out_ld(s, addr_type, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
94
- pair = (uint64_t)i1 << 32 | i2;
41
}
95
-#else
96
- pair = (uint64_t)i2 << 32 | i1;
97
-#endif
98
-
99
- /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
100
- within qatomic_set that would fail to build a ppc32 host. */
101
- qatomic_set__nocheck((uint64_t *)jmp_rw, pair);
102
- flush_idcache_range(jmp_rx, jmp_rw, 8);
103
- } else {
104
+ if (TCG_TARGET_REG_BITS == 32) {
105
intptr_t diff = addr - jmp_rx;
106
tcg_debug_assert(in_range_b(diff));
107
qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
108
flush_idcache_range(jmp_rx, jmp_rw, 4);
109
+ return;
110
}
42
}
111
+
43
112
+ /*
44
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
113
+ * For 16-bit displacements, we can use a single add + branch.
45
* Load the TLB addend for use on the fast path.
114
+ * This happens quite often.
46
* Do this asap to minimize any load use delay.
115
+ */
47
*/
116
+ if (tb_diff == (int16_t)tb_diff) {
48
- if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
117
+ i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
49
+ if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
118
+ i1 = B | (br_diff & 0x3fffffc);
50
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
119
+ ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
51
offsetof(CPUTLBEntry, addend));
120
+ return;
52
}
121
+ }
53
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
122
+
54
}
123
+ lo = (int16_t)tb_diff;
55
124
+ hi = (int32_t)(tb_diff - lo);
56
/* Mask the address for the requested alignment. */
125
+ assert(tb_diff == hi + lo);
57
- if (TARGET_LONG_BITS == 32) {
126
+ i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
58
+ if (addr_type == TCG_TYPE_I32) {
127
+ i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
59
tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
128
+
60
(32 - a_bits) & 31, 31 - s->page_bits);
129
+ /*
61
} else if (a_bits == 0) {
130
+ * Without stq from 2.07, we can only update two insns,
62
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
131
+ * and those must be the ones that load the target address.
63
}
132
+ */
64
}
133
+ if (!have_isa_2_07) {
65
134
+ ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
66
- if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
135
+ return;
67
+ if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
136
+ }
68
/* Low part comparison into cr7. */
137
+
69
tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
138
+ /*
70
0, 7, TCG_TYPE_I32);
139
+ * For 26-bit displacements, we can use a direct branch.
71
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
140
+ * Otherwise we still need the indirect branch, which we
72
tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
141
+ * must restore after a potential direct branch write.
73
} else {
142
+ */
74
/* Full comparison into cr7. */
143
+ br_diff -= 4;
75
- tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
144
+ if (in_range_b(br_diff)) {
76
- 0, 7, TCG_TYPE_TL);
145
+ i2 = B | (br_diff & 0x3fffffc);
77
+ tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 7, addr_type);
146
+ i3 = NOP;
78
}
147
+ } else {
79
148
+ i2 = MTSPR | RS(TCG_REG_TB) | CTR;
80
/* Load a pointer into the current opcode w/conditional branch-link. */
149
+ i3 = BCCTR | BO_ALWAYS;
81
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
150
+ }
82
h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
151
+ ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3);
83
#endif
152
}
84
153
85
- if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
154
static void tcg_out_call_int(TCGContext *s, int lk,
86
+ if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
155
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
87
/* Zero-extend the guest address for use in the host address. */
156
if (s->tb_jmp_insn_offset) {
88
tcg_out_ext32u(s, TCG_REG_R0, addrlo);
157
/* Direct jump. */
89
h->index = TCG_REG_R0;
158
if (TCG_TARGET_REG_BITS == 64) {
159
- /* Ensure the next insns are 8-byte aligned. */
160
- if ((uintptr_t)s->code_ptr & 7) {
161
+ /* Ensure the next insns are 8 or 16-byte aligned. */
162
+ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
163
tcg_out32(s, NOP);
164
}
165
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
166
--
90
--
167
2.34.1
91
2.34.1
diff view generated by jsdifflib
New patch
1
All uses replaced with TCGContext.addr_type.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/riscv/tcg-target.c.inc | 13 +++++++------
7
1 file changed, 7 insertions(+), 6 deletions(-)
8
9
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/riscv/tcg-target.c.inc
12
+++ b/tcg/riscv/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
14
TCGReg addr_reg, MemOpIdx oi,
15
bool is_ld)
16
{
17
+ TCGType addr_type = s->addr_type;
18
TCGLabelQemuLdst *ldst = NULL;
19
MemOp opc = get_memop(oi);
20
TCGAtomAlign aa;
21
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
22
addr_adj = addr_reg;
23
if (a_mask < s_mask) {
24
addr_adj = TCG_REG_TMP0;
25
- tcg_out_opc_imm(s, TARGET_LONG_BITS == 32 ? OPC_ADDIW : OPC_ADDI,
26
+ tcg_out_opc_imm(s, addr_type == TCG_TYPE_I32 ? OPC_ADDIW : OPC_ADDI,
27
addr_adj, addr_reg, s_mask - a_mask);
28
}
29
compare_mask = s->page_mask | a_mask;
30
if (compare_mask == sextreg(compare_mask, 0, 12)) {
31
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_adj, compare_mask);
32
} else {
33
- tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
34
+ tcg_out_movi(s, addr_type, TCG_REG_TMP1, compare_mask);
35
tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_adj);
36
}
37
38
/* Load the tlb comparator and the addend. */
39
- tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
40
+ tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
41
is_ld ? offsetof(CPUTLBEntry, addr_read)
42
: offsetof(CPUTLBEntry, addr_write));
43
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
44
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
45
tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
46
47
/* TLB Hit - translate address using addend. */
48
- if (TARGET_LONG_BITS == 64) {
49
+ if (addr_type != TCG_TYPE_I32) {
50
tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
51
} else if (have_zba) {
52
tcg_out_opc_reg(s, OPC_ADD_UW, TCG_REG_TMP0, addr_reg, TCG_REG_TMP2);
53
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
54
55
if (guest_base != 0) {
56
base = TCG_REG_TMP0;
57
- if (TARGET_LONG_BITS == 64) {
58
+ if (addr_type != TCG_TYPE_I32) {
59
tcg_out_opc_reg(s, OPC_ADD, base, addr_reg, TCG_GUEST_BASE_REG);
60
} else if (have_zba) {
61
tcg_out_opc_reg(s, OPC_ADD_UW, base, addr_reg, TCG_GUEST_BASE_REG);
62
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
63
tcg_out_ext32u(s, base, addr_reg);
64
tcg_out_opc_reg(s, OPC_ADD, base, base, TCG_GUEST_BASE_REG);
65
}
66
- } else if (TARGET_LONG_BITS == 64) {
67
+ } else if (addr_type != TCG_TYPE_I32) {
68
base = addr_reg;
69
} else {
70
base = TCG_REG_TMP0;
71
--
72
2.34.1
73
74
diff view generated by jsdifflib
New patch
1
All uses replaced with TCGContext.addr_type.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/s390x/tcg-target.c.inc | 9 +++++----
7
1 file changed, 5 insertions(+), 4 deletions(-)
8
9
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/s390x/tcg-target.c.inc
12
+++ b/tcg/s390x/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
14
TCGReg addr_reg, MemOpIdx oi,
15
bool is_ld)
16
{
17
+ TCGType addr_type = s->addr_type;
18
TCGLabelQemuLdst *ldst = NULL;
19
MemOp opc = get_memop(oi);
20
MemOp s_bits = opc & MO_SIZE;
21
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
22
tgen_andi_risbg(s, TCG_REG_R0, addr_reg, tlb_mask);
23
} else {
24
tcg_out_insn(s, RX, LA, TCG_REG_R0, addr_reg, TCG_REG_NONE, a_off);
25
- tgen_andi(s, TCG_TYPE_TL, TCG_REG_R0, tlb_mask);
26
+ tgen_andi(s, addr_type, TCG_REG_R0, tlb_mask);
27
}
28
29
if (is_ld) {
30
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
31
} else {
32
ofs = offsetof(CPUTLBEntry, addr_write);
33
}
34
- if (TARGET_LONG_BITS == 32) {
35
+ if (addr_type == TCG_TYPE_I32) {
36
tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
37
} else {
38
tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
39
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
40
tcg_out_insn(s, RXY, LG, h->index, TCG_TMP0, TCG_REG_NONE,
41
offsetof(CPUTLBEntry, addend));
42
43
- if (TARGET_LONG_BITS == 32) {
44
+ if (addr_type == TCG_TYPE_I32) {
45
tcg_out_insn(s, RRE, ALGFR, h->index, addr_reg);
46
h->base = TCG_REG_NONE;
47
} else {
48
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
49
}
50
51
h->base = addr_reg;
52
- if (TARGET_LONG_BITS == 32) {
53
+ if (addr_type == TCG_TYPE_I32) {
54
tcg_out_ext32u(s, TCG_TMP0, addr_reg);
55
h->base = TCG_TMP0;
56
}
57
--
58
2.34.1
59
60
diff view generated by jsdifflib
New patch
1
All uses replaced with TCGContext.addr_type.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/sparc64/tcg-target.c.inc | 7 ++++---
7
1 file changed, 4 insertions(+), 3 deletions(-)
8
9
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/sparc64/tcg-target.c.inc
12
+++ b/tcg/sparc64/tcg-target.c.inc
13
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
14
TCGReg addr_reg, MemOpIdx oi,
15
bool is_ld)
16
{
17
+ TCGType addr_type = s->addr_type;
18
TCGLabelQemuLdst *ldst = NULL;
19
MemOp opc = get_memop(oi);
20
MemOp s_bits = opc & MO_SIZE;
21
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
22
tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD);
23
24
/* Load the tlb comparator and the addend. */
25
- tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_T2, TCG_REG_T1, cmp_off);
26
+ tcg_out_ld(s, addr_type, TCG_REG_T2, TCG_REG_T1, cmp_off);
27
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off);
28
h->base = TCG_REG_T1;
29
30
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
31
ldst->label_ptr[0] = s->code_ptr;
32
33
/* bne,pn %[xi]cc, label0 */
34
- cc = TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC;
35
+ cc = addr_type == TCG_TYPE_I32 ? BPCC_ICC : BPCC_XCC;
36
tcg_out_bpcc0(s, COND_NE, BPCC_PN | cc, 0);
37
#else
38
/*
39
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
40
#endif
41
42
/* If the guest address must be zero-extended, do in the delay slot. */
43
- if (TARGET_LONG_BITS == 32) {
44
+ if (addr_type == TCG_TYPE_I32) {
45
tcg_out_ext32u(s, TCG_REG_T2, addr_reg);
46
h->index = TCG_REG_T2;
47
} else {
48
--
49
2.34.1
50
51
diff view generated by jsdifflib
New patch
1
Removes the only use of TARGET_LONG_BITS from tcg.h, which is to be
2
target independent. Move the symbol to a define in tcg-op.h, which
3
will continue to be target dependent. Rather than complicate matters
4
for the use in tb_gen_code(), expand the definition there.
1
5
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/tcg/tcg-op.h | 8 ++++++++
10
include/tcg/tcg.h | 7 -------
11
accel/tcg/translate-all.c | 2 +-
12
3 files changed, 9 insertions(+), 8 deletions(-)
13
14
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-op.h
17
+++ b/include/tcg/tcg-op.h
18
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
19
#error must include QEMU headers
20
#endif
21
22
+#if TARGET_LONG_BITS == 32
23
+# define TCG_TYPE_TL TCG_TYPE_I32
24
+#elif TARGET_LONG_BITS == 64
25
+# define TCG_TYPE_TL TCG_TYPE_I64
26
+#else
27
+# error
28
+#endif
29
+
30
#if TARGET_INSN_START_WORDS == 1
31
static inline void tcg_gen_insn_start(target_ulong pc)
32
{
33
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/include/tcg/tcg.h
36
+++ b/include/tcg/tcg.h
37
@@ -XXX,XX +XXX,XX @@ typedef enum TCGType {
38
#else
39
TCG_TYPE_PTR = TCG_TYPE_I64,
40
#endif
41
-
42
- /* An alias for the size of the target "long", aka register. */
43
-#if TARGET_LONG_BITS == 64
44
- TCG_TYPE_TL = TCG_TYPE_I64,
45
-#else
46
- TCG_TYPE_TL = TCG_TYPE_I32,
47
-#endif
48
} TCGType;
49
50
/**
51
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/accel/tcg/translate-all.c
54
+++ b/accel/tcg/translate-all.c
55
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
56
tb_set_page_addr0(tb, phys_pc);
57
tb_set_page_addr1(tb, -1);
58
tcg_ctx->gen_tb = tb;
59
- tcg_ctx->addr_type = TCG_TYPE_TL;
60
+ tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
61
#ifdef CONFIG_SOFTMMU
62
tcg_ctx->page_bits = TARGET_PAGE_BITS;
63
tcg_ctx->page_mask = TARGET_PAGE_MASK;
64
--
65
2.34.1
66
67
diff view generated by jsdifflib
1
This structure will shortly contain more than just
1
This makes CPUTLBEntry agnostic to the address size of the guest.
2
data for accessing MMIO. Rename the 'addr' member
2
When 32-bit addresses are in effect, we can simply read the low
3
to 'xlat_section' to more clearly indicate its purpose.
3
32 bits of the 64-bit field. Similarly when we need to update
4
4
the field for setting TLB_NOTDIRTY.
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
6
For TCG backends that could in theory be big-endian, but in
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
practice are not (arm, loongarch, riscv), use QEMU_BUILD_BUG_ON
8
to document and ensure this is not accidentally missed.
9
10
For s390x, which is always big-endian, use HOST_BIG_ENDIAN anyway,
11
to document the reason for the adjustment.
12
13
For sparc64 and ppc64, always perform a 64-bit load, and rely on
14
the following 32-bit comparison to ignore the high bits.
15
16
Rearrange mips and ppc if ladders for clarity.
17
18
Reviewed-by: Anton Johansson <anjo@rev.ng>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
19
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
20
---
10
include/exec/cpu-defs.h | 22 ++++----
21
include/exec/cpu-defs.h | 37 +++++++++++---------------------
11
accel/tcg/cputlb.c | 102 +++++++++++++++++++------------------
22
include/exec/cpu_ldst.h | 19 ++++++++++------
12
target/arm/mte_helper.c | 14 ++---
23
accel/tcg/cputlb.c | 8 +++++--
13
target/arm/sve_helper.c | 4 +-
24
tcg/aarch64/tcg-target.c.inc | 1 +
14
target/arm/translate-a64.c | 2 +-
25
tcg/arm/tcg-target.c.inc | 1 +
15
5 files changed, 73 insertions(+), 71 deletions(-)
26
tcg/loongarch64/tcg-target.c.inc | 1 +
27
tcg/mips/tcg-target.c.inc | 13 ++++++-----
28
tcg/ppc/tcg-target.c.inc | 28 +++++++++++++-----------
29
tcg/riscv/tcg-target.c.inc | 1 +
30
tcg/s390x/tcg-target.c.inc | 1 +
31
tcg/sparc64/tcg-target.c.inc | 8 +++++--
32
11 files changed, 67 insertions(+), 51 deletions(-)
16
33
17
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
34
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
18
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-defs.h
36
--- a/include/exec/cpu-defs.h
20
+++ b/include/exec/cpu-defs.h
37
+++ b/include/exec/cpu-defs.h
21
@@ -XXX,XX +XXX,XX @@ typedef uint64_t target_ulong;
38
@@ -XXX,XX +XXX,XX @@
22
# endif
39
/* use a fully associative victim tlb of 8 entries */
40
#define CPU_VTLB_SIZE 8
41
42
-#if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32
43
-#define CPU_TLB_ENTRY_BITS 4
44
-#else
45
#define CPU_TLB_ENTRY_BITS 5
46
-#endif
47
48
#define CPU_TLB_DYN_MIN_BITS 6
49
#define CPU_TLB_DYN_DEFAULT_BITS 8
50
@@ -XXX,XX +XXX,XX @@
23
# endif
51
# endif
24
52
25
+/* Minimalized TLB entry for use by TCG fast path. */
53
/* Minimalized TLB entry for use by TCG fast path. */
26
typedef struct CPUTLBEntry {
54
-typedef struct CPUTLBEntry {
27
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
55
- /* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
28
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
56
- bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
29
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntry {
57
- go directly to ram.
58
- bit 3 : indicates that the entry is invalid
59
- bit 2..0 : zero
60
- */
61
- union {
62
- struct {
63
- target_ulong addr_read;
64
- target_ulong addr_write;
65
- target_ulong addr_code;
66
- /* Addend to virtual address to get host address. IO accesses
67
- use the corresponding iotlb value. */
68
- uintptr_t addend;
69
- };
70
+typedef union CPUTLBEntry {
71
+ struct {
72
+ uint64_t addr_read;
73
+ uint64_t addr_write;
74
+ uint64_t addr_code;
75
/*
76
- * Padding to get a power of two size, as well as index
77
- * access to addr_{read,write,code}.
78
+ * Addend to virtual address to get host address. IO accesses
79
+ * use the corresponding iotlb value.
80
*/
81
- target_ulong addr_idx[(1 << CPU_TLB_ENTRY_BITS) / TARGET_LONG_SIZE];
82
+ uintptr_t addend;
83
};
84
+ /*
85
+ * Padding to get a power of two size, as well as index
86
+ * access to addr_{read,write,code}.
87
+ */
88
+ uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
89
} CPUTLBEntry;
30
90
31
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
91
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
32
92
33
-/* The IOTLB is not accessed directly inline by generated TCG code,
93
-
34
- * so the CPUIOTLBEntry layout is not as critical as that of the
94
#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
35
- * CPUTLBEntry. (This is also why we don't want to combine the two
95
36
- * structs into one.)
96
#if !defined(CONFIG_USER_ONLY)
37
+/*
97
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
38
+ * The full TLB entry, which is not accessed by generated TCG code,
98
index XXXXXXX..XXXXXXX 100644
39
+ * so the layout is not as critical as that of CPUTLBEntry. This is
99
--- a/include/exec/cpu_ldst.h
40
+ * also why we don't want to combine the two structs.
100
+++ b/include/exec/cpu_ldst.h
41
*/
101
@@ -XXX,XX +XXX,XX @@ static inline target_ulong tlb_read_idx(const CPUTLBEntry *entry,
42
-typedef struct CPUIOTLBEntry {
102
{
43
+typedef struct CPUTLBEntryFull {
103
/* Do not rearrange the CPUTLBEntry structure members. */
44
/*
104
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
45
- * @addr contains:
105
- MMU_DATA_LOAD * TARGET_LONG_SIZE);
46
+ * @xlat_section contains:
106
+ MMU_DATA_LOAD * sizeof(uint64_t));
47
* - in the lower TARGET_PAGE_BITS, a physical section number
107
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
48
* - with the lower TARGET_PAGE_BITS masked off, an offset which
108
- MMU_DATA_STORE * TARGET_LONG_SIZE);
49
* must be added to the virtual address to obtain:
109
+ MMU_DATA_STORE * sizeof(uint64_t));
50
@@ -XXX,XX +XXX,XX @@ typedef struct CPUIOTLBEntry {
110
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
51
* number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
111
- MMU_INST_FETCH * TARGET_LONG_SIZE);
52
* + the offset within the target MemoryRegion (otherwise)
112
+ MMU_INST_FETCH * sizeof(uint64_t));
53
*/
113
54
- hwaddr addr;
114
- const target_ulong *ptr = &entry->addr_idx[access_type];
55
+ hwaddr xlat_section;
115
-#if TCG_OVERSIZED_GUEST
56
MemTxAttrs attrs;
116
- return *ptr;
57
-} CPUIOTLBEntry;
117
+#if TARGET_LONG_BITS == 32
58
+} CPUTLBEntryFull;
118
+ /* Use qatomic_read, in case of addr_write; only care about low bits. */
59
119
+ const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
60
/*
120
+ ptr += HOST_BIG_ENDIAN;
61
* Data elements that are per MMU mode, minus the bits accessed by
121
+ return qatomic_read(ptr);
62
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBDesc {
122
#else
63
size_t vindex;
123
+ const uint64_t *ptr = &entry->addr_idx[access_type];
64
/* The tlb victim table, in two parts. */
124
+# if TCG_OVERSIZED_GUEST
65
CPUTLBEntry vtable[CPU_VTLB_SIZE];
125
+ return *ptr;
66
- CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
126
+# else
67
- /* The iotlb. */
127
/* ofs might correspond to .addr_write, so use qatomic_read */
68
- CPUIOTLBEntry *iotlb;
128
return qatomic_read(ptr);
69
+ CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
129
+# endif
70
+ CPUTLBEntryFull *fulltlb;
130
#endif
71
} CPUTLBDesc;
131
}
72
132
73
/*
74
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
133
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
75
index XXXXXXX..XXXXXXX 100644
134
index XXXXXXX..XXXXXXX 100644
76
--- a/accel/tcg/cputlb.c
135
--- a/accel/tcg/cputlb.c
77
+++ b/accel/tcg/cputlb.c
136
+++ b/accel/tcg/cputlb.c
78
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
137
@@ -XXX,XX +XXX,XX @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
79
}
138
addr &= TARGET_PAGE_MASK;
80
139
addr += tlb_entry->addend;
81
g_free(fast->table);
140
if ((addr - start) < length) {
82
- g_free(desc->iotlb);
141
-#if TCG_OVERSIZED_GUEST
83
+ g_free(desc->fulltlb);
142
+#if TARGET_LONG_BITS == 32
84
143
+ uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
85
tlb_window_reset(desc, now, 0);
144
+ ptr_write += HOST_BIG_ENDIAN;
86
/* desc->n_used_entries is cleared by the caller */
145
+ qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
87
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
146
+#elif TCG_OVERSIZED_GUEST
88
fast->table = g_try_new(CPUTLBEntry, new_size);
147
tlb_entry->addr_write |= TLB_NOTDIRTY;
89
- desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
148
#else
90
+ desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
149
qatomic_set(&tlb_entry->addr_write,
150
- tlb_entry->addr_write | TLB_NOTDIRTY);
151
+ tlb_entry->addr_write | TLB_NOTDIRTY);
152
#endif
153
}
154
}
155
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
156
index XXXXXXX..XXXXXXX 100644
157
--- a/tcg/aarch64/tcg-target.c.inc
158
+++ b/tcg/aarch64/tcg-target.c.inc
159
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
160
tcg_out_insn(s, 3502, ADD, 1, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP0);
161
162
/* Load the tlb comparator into TMP0, and the fast path addend into TMP1. */
163
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
164
tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP1,
165
is_ld ? offsetof(CPUTLBEntry, addr_read)
166
: offsetof(CPUTLBEntry, addr_write));
167
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
168
index XXXXXXX..XXXXXXX 100644
169
--- a/tcg/arm/tcg-target.c.inc
170
+++ b/tcg/arm/tcg-target.c.inc
171
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
172
* Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
173
* Load the tlb comparator into R2/R3 and the fast path addend into R1.
174
*/
175
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
176
if (cmp_off == 0) {
177
if (s->addr_type == TCG_TYPE_I32) {
178
tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
179
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
180
index XXXXXXX..XXXXXXX 100644
181
--- a/tcg/loongarch64/tcg-target.c.inc
182
+++ b/tcg/loongarch64/tcg-target.c.inc
183
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
184
tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
185
186
/* Load the tlb comparator and the addend. */
187
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
188
tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
189
is_ld ? offsetof(CPUTLBEntry, addr_read)
190
: offsetof(CPUTLBEntry, addr_write));
191
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
192
index XXXXXXX..XXXXXXX 100644
193
--- a/tcg/mips/tcg-target.c.inc
194
+++ b/tcg/mips/tcg-target.c.inc
195
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
196
/* Add the tlb_table pointer, creating the CPUTLBEntry address in TMP3. */
197
tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1);
198
199
+ if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) {
200
+ /* Load the (low half) tlb comparator. */
201
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3,
202
+ cmp_off + HOST_BIG_ENDIAN * 4);
203
+ } else {
204
+ tcg_out_ld(s, TCG_TYPE_I64, TCG_TMP0, TCG_TMP3, cmp_off);
205
+ }
206
+
207
if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
208
- /* Load the tlb comparator. */
209
- tcg_out_ld(s, addr_type, TCG_TMP0, TCG_TMP3, cmp_off);
210
/* Load the tlb addend for the fast path. */
211
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP3, TCG_TMP3, add_off);
212
- } else {
213
- /* Load the low half of the tlb comparator. */
214
- tcg_out_ldst(s, OPC_LW, TCG_TMP0, TCG_TMP3, cmp_off + LO_OFF);
215
}
91
216
92
/*
217
/*
93
* If the allocations fail, try smaller sizes. We just freed some
218
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
94
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
219
index XXXXXXX..XXXXXXX 100644
95
* allocations to fail though, so we progressively reduce the allocation
220
--- a/tcg/ppc/tcg-target.c.inc
96
* size, aborting if we cannot even allocate the smallest TLB we support.
221
+++ b/tcg/ppc/tcg-target.c.inc
97
*/
222
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
98
- while (fast->table == NULL || desc->iotlb == NULL) {
223
}
99
+ while (fast->table == NULL || desc->fulltlb == NULL) {
224
tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
100
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
225
101
error_report("%s: %s", __func__, strerror(errno));
226
- /* Load the (low part) TLB comparator into TMP2. */
102
abort();
227
- if (cmp_off == 0
103
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
228
- && (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32)) {
104
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
229
- uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32
105
230
- ? LWZUX : LDUX);
106
g_free(fast->table);
231
- tcg_out32(s, lxu | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
107
- g_free(desc->iotlb);
232
+ /*
108
+ g_free(desc->fulltlb);
233
+ * Load the (low part) TLB comparator into TMP2.
109
fast->table = g_try_new(CPUTLBEntry, new_size);
234
+ * For 64-bit host, always load the entire 64-bit slot for simplicity.
110
- desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
235
+ * We will ignore the high bits with tcg_out_cmp(..., addr_type).
111
+ desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
236
+ */
112
}
237
+ if (TCG_TARGET_REG_BITS == 64) {
113
}
238
+ if (cmp_off == 0) {
114
239
+ tcg_out32(s, LDUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
115
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
240
+ } else {
116
desc->n_used_entries = 0;
241
+ tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
117
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
242
+ tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
118
fast->table = g_new(CPUTLBEntry, n_entries);
243
+ }
119
- desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
244
+ } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
120
+ desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
245
+ tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
121
tlb_mmu_flush_locked(desc, fast);
246
} else {
122
}
247
tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
123
248
- if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
124
@@ -XXX,XX +XXX,XX @@ void tlb_destroy(CPUState *cpu)
249
- tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2,
125
CPUTLBDescFast *fast = &env_tlb(env)->f[i];
250
- TCG_REG_TMP1, cmp_off + 4 * HOST_BIG_ENDIAN);
126
251
- } else {
127
g_free(fast->table);
252
- tcg_out_ld(s, addr_type, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
128
- g_free(desc->iotlb);
253
- }
129
+ g_free(desc->fulltlb);
254
+ tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
130
}
255
+ cmp_off + 4 * HOST_BIG_ENDIAN);
131
}
256
}
132
133
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
134
135
/* Evict the old entry into the victim tlb. */
136
copy_tlb_helper_locked(tv, te);
137
- desc->viotlb[vidx] = desc->iotlb[index];
138
+ desc->vfulltlb[vidx] = desc->fulltlb[index];
139
tlb_n_used_entries_dec(env, mmu_idx);
140
}
141
142
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
143
* subtract here is that of the page base, and not the same as the
144
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
145
*/
146
- desc->iotlb[index].addr = iotlb - vaddr_page;
147
- desc->iotlb[index].attrs = attrs;
148
+ desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
149
+ desc->fulltlb[index].attrs = attrs;
150
151
/* Now calculate the new entry */
152
tn.addend = addend - vaddr_page;
153
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
154
}
155
}
156
157
-static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
158
+static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
159
int mmu_idx, target_ulong addr, uintptr_t retaddr,
160
MMUAccessType access_type, MemOp op)
161
{
162
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
163
bool locked = false;
164
MemTxResult r;
165
166
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
167
+ section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
168
mr = section->mr;
169
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
170
+ mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
171
cpu->mem_io_pc = retaddr;
172
if (!cpu->can_do_io) {
173
cpu_io_recompile(cpu, retaddr);
174
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
175
qemu_mutex_lock_iothread();
176
locked = true;
177
}
178
- r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
179
+ r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
180
if (r != MEMTX_OK) {
181
hwaddr physaddr = mr_offset +
182
section->offset_within_address_space -
183
section->offset_within_region;
184
185
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
186
- mmu_idx, iotlbentry->attrs, r, retaddr);
187
+ mmu_idx, full->attrs, r, retaddr);
188
}
189
if (locked) {
190
qemu_mutex_unlock_iothread();
191
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
192
}
193
194
/*
195
- * Save a potentially trashed IOTLB entry for later lookup by plugin.
196
- * This is read by tlb_plugin_lookup if the iotlb entry doesn't match
197
+ * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
198
+ * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
199
* because of the side effect of io_writex changing memory layout.
200
*/
201
static void save_iotlb_data(CPUState *cs, hwaddr addr,
202
@@ -XXX,XX +XXX,XX @@ static void save_iotlb_data(CPUState *cs, hwaddr addr,
203
#endif
204
}
205
206
-static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
207
+static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
208
int mmu_idx, uint64_t val, target_ulong addr,
209
uintptr_t retaddr, MemOp op)
210
{
211
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
212
bool locked = false;
213
MemTxResult r;
214
215
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
216
+ section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
217
mr = section->mr;
218
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
219
+ mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
220
if (!cpu->can_do_io) {
221
cpu_io_recompile(cpu, retaddr);
222
}
223
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
224
* The memory_region_dispatch may trigger a flush/resize
225
* so for plugins we save the iotlb_data just in case.
226
*/
227
- save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
228
+ save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
229
230
if (!qemu_mutex_iothread_locked()) {
231
qemu_mutex_lock_iothread();
232
locked = true;
233
}
234
- r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
235
+ r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
236
if (r != MEMTX_OK) {
237
hwaddr physaddr = mr_offset +
238
section->offset_within_address_space -
239
section->offset_within_region;
240
241
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
242
- MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
243
+ MMU_DATA_STORE, mmu_idx, full->attrs, r,
244
retaddr);
245
}
246
if (locked) {
247
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
248
copy_tlb_helper_locked(vtlb, &tmptlb);
249
qemu_spin_unlock(&env_tlb(env)->c.lock);
250
251
- CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
252
- CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
253
- tmpio = *io; *io = *vio; *vio = tmpio;
254
+ CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
255
+ CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
256
+ CPUTLBEntryFull tmpf;
257
+ tmpf = *f1; *f1 = *f2; *f2 = tmpf;
258
return true;
259
}
260
}
261
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
262
(ADDR) & TARGET_PAGE_MASK)
263
264
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
265
- CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
266
+ CPUTLBEntryFull *full, uintptr_t retaddr)
267
{
268
- ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
269
+ ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
270
271
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
272
273
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
274
/* Handle clean RAM pages. */
275
if (unlikely(flags & TLB_NOTDIRTY)) {
276
uintptr_t index = tlb_index(env, mmu_idx, addr);
277
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
278
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
279
280
- notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
281
+ notdirty_write(env_cpu(env), addr, 1, full, retaddr);
282
flags &= ~TLB_NOTDIRTY;
283
}
284
285
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
286
287
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
288
uintptr_t index = tlb_index(env, mmu_idx, addr);
289
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
290
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
291
292
/* Handle watchpoints. */
293
if (flags & TLB_WATCHPOINT) {
294
int wp_access = (access_type == MMU_DATA_STORE
295
? BP_MEM_WRITE : BP_MEM_READ);
296
cpu_check_watchpoint(env_cpu(env), addr, size,
297
- iotlbentry->attrs, wp_access, retaddr);
298
+ full->attrs, wp_access, retaddr);
299
}
300
301
/* Handle clean RAM pages. */
302
if (flags & TLB_NOTDIRTY) {
303
- notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
304
+ notdirty_write(env_cpu(env), addr, 1, full, retaddr);
305
}
306
}
307
308
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
309
* should have just filled the TLB. The one corner case is io_writex
310
* which can cause TLB flushes and potential resizing of the TLBs
311
* losing the information we need. In those cases we need to recover
312
- * data from a copy of the iotlbentry. As long as this always occurs
313
+ * data from a copy of the CPUTLBEntryFull. As long as this always occurs
314
* from the same thread (which a mem callback will be) this is safe.
315
*/
316
317
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
318
if (likely(tlb_hit(tlb_addr, addr))) {
319
/* We must have an iotlb entry for MMIO */
320
if (tlb_addr & TLB_MMIO) {
321
- CPUIOTLBEntry *iotlbentry;
322
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
323
+ CPUTLBEntryFull *full;
324
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
325
data->is_io = true;
326
- data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
327
- data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
328
+ data->v.io.section =
329
+ iotlb_to_section(cpu, full->xlat_section, full->attrs);
330
+ data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
331
} else {
332
data->is_io = false;
333
data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
334
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
335
336
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
337
notdirty_write(env_cpu(env), addr, size,
338
- &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
339
+ &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
340
}
341
342
return hostaddr;
343
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
344
345
/* Handle anything that isn't just a straight memory access. */
346
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
347
- CPUIOTLBEntry *iotlbentry;
348
+ CPUTLBEntryFull *full;
349
bool need_swap;
350
351
/* For anything that is unaligned, recurse through full_load. */
352
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
353
goto do_unaligned_access;
354
}
355
356
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
357
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
358
359
/* Handle watchpoints. */
360
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
361
/* On watchpoint hit, this will longjmp out. */
362
cpu_check_watchpoint(env_cpu(env), addr, size,
363
- iotlbentry->attrs, BP_MEM_READ, retaddr);
364
+ full->attrs, BP_MEM_READ, retaddr);
365
}
366
367
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
368
369
/* Handle I/O access. */
370
if (likely(tlb_addr & TLB_MMIO)) {
371
- return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
372
+ return io_readx(env, full, mmu_idx, addr, retaddr,
373
access_type, op ^ (need_swap * MO_BSWAP));
374
}
375
376
@@ -XXX,XX +XXX,XX @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
377
*/
378
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
379
cpu_check_watchpoint(env_cpu(env), addr, size - size2,
380
- env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
381
+ env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
382
BP_MEM_WRITE, retaddr);
383
}
384
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
385
cpu_check_watchpoint(env_cpu(env), page2, size2,
386
- env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
387
+ env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
388
BP_MEM_WRITE, retaddr);
389
}
390
391
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
392
393
/* Handle anything that isn't just a straight memory access. */
394
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
395
- CPUIOTLBEntry *iotlbentry;
396
+ CPUTLBEntryFull *full;
397
bool need_swap;
398
399
/* For anything that is unaligned, recurse through byte stores. */
400
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
401
goto do_unaligned_access;
402
}
403
404
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
405
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
406
407
/* Handle watchpoints. */
408
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
409
/* On watchpoint hit, this will longjmp out. */
410
cpu_check_watchpoint(env_cpu(env), addr, size,
411
- iotlbentry->attrs, BP_MEM_WRITE, retaddr);
412
+ full->attrs, BP_MEM_WRITE, retaddr);
413
}
414
415
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
416
417
/* Handle I/O access. */
418
if (tlb_addr & TLB_MMIO) {
419
- io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
420
+ io_writex(env, full, mmu_idx, val, addr, retaddr,
421
op ^ (need_swap * MO_BSWAP));
422
return;
423
}
424
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
425
426
/* Handle clean RAM pages. */
427
if (tlb_addr & TLB_NOTDIRTY) {
428
- notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
429
+ notdirty_write(env_cpu(env), addr, size, full, retaddr);
430
}
431
432
haddr = (void *)((uintptr_t)addr + entry->addend);
433
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
434
index XXXXXXX..XXXXXXX 100644
435
--- a/target/arm/mte_helper.c
436
+++ b/target/arm/mte_helper.c
437
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
438
return tags + index;
439
#else
440
uintptr_t index;
441
- CPUIOTLBEntry *iotlbentry;
442
+ CPUTLBEntryFull *full;
443
int in_page, flags;
444
ram_addr_t ptr_ra;
445
hwaddr ptr_paddr, tag_paddr, xlat;
446
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
447
assert(!(flags & TLB_INVALID_MASK));
448
257
449
/*
258
/*
450
- * Find the iotlbentry for ptr. This *must* be present in the TLB
259
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
451
+ * Find the CPUTLBEntryFull for ptr. This *must* be present in the TLB
260
index XXXXXXX..XXXXXXX 100644
452
* because we just found the mapping.
261
--- a/tcg/riscv/tcg-target.c.inc
453
* TODO: Perhaps there should be a cputlb helper that returns a
262
+++ b/tcg/riscv/tcg-target.c.inc
454
* matching tlb entry + iotlb entry.
263
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
455
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
264
}
456
g_assert(tlb_hit(comparator, ptr));
265
457
}
266
/* Load the tlb comparator and the addend. */
458
# endif
267
+ QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
459
- iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index];
268
tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
460
+ full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index];
269
is_ld ? offsetof(CPUTLBEntry, addr_read)
461
270
: offsetof(CPUTLBEntry, addr_write));
462
/* If the virtual page MemAttr != Tagged, access unchecked. */
271
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
463
- if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) {
272
index XXXXXXX..XXXXXXX 100644
464
+ if (!arm_tlb_mte_tagged(&full->attrs)) {
273
--- a/tcg/s390x/tcg-target.c.inc
465
return NULL;
274
+++ b/tcg/s390x/tcg-target.c.inc
466
}
275
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
467
276
ofs = offsetof(CPUTLBEntry, addr_write);
468
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
277
}
469
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
278
if (addr_type == TCG_TYPE_I32) {
470
assert(ra != 0);
279
+ ofs += HOST_BIG_ENDIAN * 4;
471
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
280
tcg_out_insn(s, RX, C, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
472
- iotlbentry->attrs, wp, ra);
281
} else {
473
+ full->attrs, wp, ra);
282
tcg_out_insn(s, RXY, CG, TCG_REG_R0, TCG_TMP0, TCG_REG_NONE, ofs);
474
}
283
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
475
284
index XXXXXXX..XXXXXXX 100644
476
/*
285
--- a/tcg/sparc64/tcg-target.c.inc
477
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
286
+++ b/tcg/sparc64/tcg-target.c.inc
478
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
287
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
479
288
/* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
480
/* Look up the address in tag space. */
289
tcg_out_arith(s, TCG_REG_T1, TCG_REG_T1, TCG_REG_T3, ARITH_ADD);
481
- tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
290
482
+ tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
291
- /* Load the tlb comparator and the addend. */
483
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
292
- tcg_out_ld(s, addr_type, TCG_REG_T2, TCG_REG_T1, cmp_off);
484
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
293
+ /*
485
tag_access == MMU_DATA_STORE,
294
+ * Load the tlb comparator and the addend.
486
- iotlbentry->attrs);
295
+ * Always load the entire 64-bit comparator for simplicity.
487
+ full->attrs);
296
+ * We will ignore the high bits via BPCC_ICC below.
488
297
+ */
489
/*
298
+ tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_T2, TCG_REG_T1, cmp_off);
490
* Note that @mr will never be NULL. If there is nothing in the address
299
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T1, TCG_REG_T1, add_off);
491
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
300
h->base = TCG_REG_T1;
492
index XXXXXXX..XXXXXXX 100644
493
--- a/target/arm/sve_helper.c
494
+++ b/target/arm/sve_helper.c
495
@@ -XXX,XX +XXX,XX @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
496
g_assert(tlb_hit(comparator, addr));
497
# endif
498
499
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
500
- info->attrs = iotlbentry->attrs;
501
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
502
+ info->attrs = full->attrs;
503
}
504
#endif
505
506
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
507
index XXXXXXX..XXXXXXX 100644
508
--- a/target/arm/translate-a64.c
509
+++ b/target/arm/translate-a64.c
510
@@ -XXX,XX +XXX,XX @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
511
* table entry even for that case.
512
*/
513
return (tlb_hit(entry->addr_code, addr) &&
514
- arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
515
+ arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs));
516
#endif
517
}
518
301
519
--
302
--
520
2.34.1
303
2.34.1
521
522
diff view generated by jsdifflib
1
The availability of tb->pc will shortly be conditional.
1
Disconnect the layout of ArchCPU from TCG compilation.
2
Introduce accessor functions to minimize ifdefs.
2
Pass the relative offset of 'env' and 'neg.tlb.f' as a parameter.
3
3
4
Pass around a known pc to places like tcg_gen_code,
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
where the caller must already have the value.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
accel/tcg/internal.h | 6 ++++
7
include/exec/cpu-defs.h | 39 +---------------------
11
include/exec/exec-all.h | 6 ++++
8
include/exec/tlb-common.h | 56 ++++++++++++++++++++++++++++++++
12
include/tcg/tcg.h | 2 +-
9
include/tcg/tcg.h | 1 +
13
accel/tcg/cpu-exec.c | 46 ++++++++++++++-----------
10
accel/tcg/translate-all.c | 2 ++
14
accel/tcg/translate-all.c | 37 +++++++++++---------
11
tcg/tcg.c | 13 ++++++++
15
target/arm/cpu.c | 4 +--
12
tcg/aarch64/tcg-target.c.inc | 7 ++--
16
target/avr/cpu.c | 2 +-
13
tcg/arm/tcg-target.c.inc | 7 ++--
17
target/hexagon/cpu.c | 2 +-
14
tcg/i386/tcg-target.c.inc | 9 ++---
18
target/hppa/cpu.c | 4 +--
15
tcg/loongarch64/tcg-target.c.inc | 7 ++--
19
target/i386/tcg/tcg-cpu.c | 2 +-
16
tcg/mips/tcg-target.c.inc | 7 ++--
20
target/loongarch/cpu.c | 2 +-
17
tcg/ppc/tcg-target.c.inc | 7 ++--
21
target/microblaze/cpu.c | 2 +-
18
tcg/riscv/tcg-target.c.inc | 7 ++--
22
target/mips/tcg/exception.c | 2 +-
19
tcg/s390x/tcg-target.c.inc | 7 ++--
23
target/mips/tcg/sysemu/special_helper.c | 2 +-
20
tcg/sparc64/tcg-target.c.inc | 7 ++--
24
target/openrisc/cpu.c | 2 +-
21
14 files changed, 110 insertions(+), 66 deletions(-)
25
target/riscv/cpu.c | 4 +--
22
create mode 100644 include/exec/tlb-common.h
26
target/rx/cpu.c | 2 +-
27
target/sh4/cpu.c | 4 +--
28
target/sparc/cpu.c | 2 +-
29
target/tricore/cpu.c | 2 +-
30
tcg/tcg.c | 8 ++---
31
21 files changed, 82 insertions(+), 61 deletions(-)
32
23
33
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
24
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
34
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
35
--- a/accel/tcg/internal.h
26
--- a/include/exec/cpu-defs.h
36
+++ b/accel/tcg/internal.h
27
+++ b/include/exec/cpu-defs.h
37
@@ -XXX,XX +XXX,XX @@ G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
28
@@ -XXX,XX +XXX,XX @@
38
void page_init(void);
29
#define NB_MMU_MODES 16
39
void tb_htable_init(void);
30
40
31
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
41
+/* Return the current PC from CPU, which may be cached in TB. */
32
+#include "exec/tlb-common.h"
42
+static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
33
43
+{
34
/* use a fully associative victim tlb of 8 entries */
44
+ return tb_pc(tb);
35
#define CPU_VTLB_SIZE 8
45
+}
36
46
+
37
-#define CPU_TLB_ENTRY_BITS 5
47
#endif /* ACCEL_TCG_INTERNAL_H */
38
-
48
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
39
#define CPU_TLB_DYN_MIN_BITS 6
49
index XXXXXXX..XXXXXXX 100644
40
#define CPU_TLB_DYN_DEFAULT_BITS 8
50
--- a/include/exec/exec-all.h
41
51
+++ b/include/exec/exec-all.h
42
@@ -XXX,XX +XXX,XX @@
52
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
43
# endif
53
uintptr_t jmp_dest[2];
44
# endif
54
};
45
55
46
-/* Minimalized TLB entry for use by TCG fast path. */
56
+/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */
47
-typedef union CPUTLBEntry {
57
+static inline target_ulong tb_pc(const TranslationBlock *tb)
48
- struct {
58
+{
49
- uint64_t addr_read;
59
+ return tb->pc;
50
- uint64_t addr_write;
60
+}
51
- uint64_t addr_code;
61
+
52
- /*
62
/* Hide the qatomic_read to make code a little easier on the eyes */
53
- * Addend to virtual address to get host address. IO accesses
63
static inline uint32_t tb_cflags(const TranslationBlock *tb)
54
- * use the corresponding iotlb value.
64
{
55
- */
56
- uintptr_t addend;
57
- };
58
- /*
59
- * Padding to get a power of two size, as well as index
60
- * access to addr_{read,write,code}.
61
- */
62
- uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
63
-} CPUTLBEntry;
64
-
65
-QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
66
-
67
#endif /* !CONFIG_USER_ONLY && CONFIG_TCG */
68
69
#if !defined(CONFIG_USER_ONLY)
70
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBDesc {
71
CPUTLBEntryFull *fulltlb;
72
} CPUTLBDesc;
73
74
-/*
75
- * Data elements that are per MMU mode, accessed by the fast path.
76
- * The structure is aligned to aid loading the pair with one insn.
77
- */
78
-typedef struct CPUTLBDescFast {
79
- /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
80
- uintptr_t mask;
81
- /* The array of tlb entries itself. */
82
- CPUTLBEntry *table;
83
-} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
84
-
85
/*
86
* Data elements that are shared between all MMU modes.
87
*/
88
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLB {
89
CPUTLBDescFast f[NB_MMU_MODES];
90
} CPUTLB;
91
92
-/* This will be used by TCG backends to compute offsets. */
93
-#define TLB_MASK_TABLE_OFS(IDX) \
94
- ((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env))
95
-
96
#else
97
98
typedef struct CPUTLB { } CPUTLB;
99
diff --git a/include/exec/tlb-common.h b/include/exec/tlb-common.h
100
new file mode 100644
101
index XXXXXXX..XXXXXXX
102
--- /dev/null
103
+++ b/include/exec/tlb-common.h
104
@@ -XXX,XX +XXX,XX @@
105
+/*
106
+ * Common definitions for the softmmu tlb
107
+ *
108
+ * Copyright (c) 2003 Fabrice Bellard
109
+ *
110
+ * This library is free software; you can redistribute it and/or
111
+ * modify it under the terms of the GNU Lesser General Public
112
+ * License as published by the Free Software Foundation; either
113
+ * version 2.1 of the License, or (at your option) any later version.
114
+ *
115
+ * This library is distributed in the hope that it will be useful,
116
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
117
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
118
+ * Lesser General Public License for more details.
119
+ *
120
+ * You should have received a copy of the GNU Lesser General Public
121
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
122
+ */
123
+#ifndef EXEC_TLB_COMMON_H
124
+#define EXEC_TLB_COMMON_H 1
125
+
126
+#define CPU_TLB_ENTRY_BITS 5
127
+
128
+/* Minimalized TLB entry for use by TCG fast path. */
129
+typedef union CPUTLBEntry {
130
+ struct {
131
+ uint64_t addr_read;
132
+ uint64_t addr_write;
133
+ uint64_t addr_code;
134
+ /*
135
+ * Addend to virtual address to get host address. IO accesses
136
+ * use the corresponding iotlb value.
137
+ */
138
+ uintptr_t addend;
139
+ };
140
+ /*
141
+ * Padding to get a power of two size, as well as index
142
+ * access to addr_{read,write,code}.
143
+ */
144
+ uint64_t addr_idx[(1 << CPU_TLB_ENTRY_BITS) / sizeof(uint64_t)];
145
+} CPUTLBEntry;
146
+
147
+QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
148
+
149
+/*
150
+ * Data elements that are per MMU mode, accessed by the fast path.
151
+ * The structure is aligned to aid loading the pair with one insn.
152
+ */
153
+typedef struct CPUTLBDescFast {
154
+ /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */
155
+ uintptr_t mask;
156
+ /* The array of tlb entries itself. */
157
+ CPUTLBEntry *table;
158
+} CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *));
159
+
160
+#endif /* EXEC_TLB_COMMON_H */
65
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
161
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
66
index XXXXXXX..XXXXXXX 100644
162
index XXXXXXX..XXXXXXX 100644
67
--- a/include/tcg/tcg.h
163
--- a/include/tcg/tcg.h
68
+++ b/include/tcg/tcg.h
164
+++ b/include/tcg/tcg.h
69
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void);
165
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
70
void tcg_prologue_init(TCGContext *s);
166
TCGType addr_type; /* TCG_TYPE_I32 or TCG_TYPE_I64 */
71
void tcg_func_start(TCGContext *s);
167
72
168
#ifdef CONFIG_SOFTMMU
73
-int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
169
+ int tlb_fast_offset;
74
+int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start);
170
int page_mask;
75
171
uint8_t page_bits;
76
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
172
uint8_t tlb_dyn_max_bits;
77
78
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
79
index XXXXXXX..XXXXXXX 100644
80
--- a/accel/tcg/cpu-exec.c
81
+++ b/accel/tcg/cpu-exec.c
82
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
83
const TranslationBlock *tb = p;
84
const struct tb_desc *desc = d;
85
86
- if (tb->pc == desc->pc &&
87
+ if (tb_pc(tb) == desc->pc &&
88
tb->page_addr[0] == desc->page_addr0 &&
89
tb->cs_base == desc->cs_base &&
90
tb->flags == desc->flags &&
91
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
92
return tb;
93
}
94
95
-static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
96
- const TranslationBlock *tb)
97
+static void log_cpu_exec(target_ulong pc, CPUState *cpu,
98
+ const TranslationBlock *tb)
99
{
100
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC))
101
- && qemu_log_in_addr_range(pc)) {
102
-
103
+ if (qemu_log_in_addr_range(pc)) {
104
qemu_log_mask(CPU_LOG_EXEC,
105
"Trace %d: %p [" TARGET_FMT_lx
106
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
107
@@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
108
return tcg_code_gen_epilogue;
109
}
110
111
- log_cpu_exec(pc, cpu, tb);
112
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
113
+ log_cpu_exec(pc, cpu, tb);
114
+ }
115
116
return tb->tc.ptr;
117
}
118
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
119
TranslationBlock *last_tb;
120
const void *tb_ptr = itb->tc.ptr;
121
122
- log_cpu_exec(itb->pc, cpu, itb);
123
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
124
+ log_cpu_exec(log_pc(cpu, itb), cpu, itb);
125
+ }
126
127
qemu_thread_jit_execute();
128
ret = tcg_qemu_tb_exec(env, tb_ptr);
129
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
130
* of the start of the TB.
131
*/
132
CPUClass *cc = CPU_GET_CLASS(cpu);
133
- qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
134
- "Stopped execution of TB chain before %p ["
135
- TARGET_FMT_lx "] %s\n",
136
- last_tb->tc.ptr, last_tb->pc,
137
- lookup_symbol(last_tb->pc));
138
+
139
if (cc->tcg_ops->synchronize_from_tb) {
140
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
141
} else {
142
assert(cc->set_pc);
143
- cc->set_pc(cpu, last_tb->pc);
144
+ cc->set_pc(cpu, tb_pc(last_tb));
145
+ }
146
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
147
+ target_ulong pc = log_pc(cpu, last_tb);
148
+ if (qemu_log_in_addr_range(pc)) {
149
+ qemu_log("Stopped execution of TB chain before %p ["
150
+ TARGET_FMT_lx "] %s\n",
151
+ last_tb->tc.ptr, pc, lookup_symbol(pc));
152
+ }
153
}
154
}
155
156
@@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
157
158
qemu_spin_unlock(&tb_next->jmp_lock);
159
160
- qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
161
- "Linking TBs %p [" TARGET_FMT_lx
162
- "] index %d -> %p [" TARGET_FMT_lx "]\n",
163
- tb->tc.ptr, tb->pc, n,
164
- tb_next->tc.ptr, tb_next->pc);
165
+ qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
166
+ tb->tc.ptr, n, tb_next->tc.ptr);
167
return;
168
169
out_unlock_next:
170
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
171
}
172
173
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
174
+ target_ulong pc,
175
TranslationBlock **last_tb, int *tb_exit)
176
{
177
int32_t insns_left;
178
179
- trace_exec_tb(tb, tb->pc);
180
+ trace_exec_tb(tb, pc);
181
tb = cpu_tb_exec(cpu, tb, tb_exit);
182
if (*tb_exit != TB_EXIT_REQUESTED) {
183
*last_tb = tb;
184
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
185
tb_add_jump(last_tb, tb_exit, tb);
186
}
187
188
- cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
189
+ cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
190
191
/* Try to align the host and virtual clocks
192
if the guest is in advance */
193
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
173
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
194
index XXXXXXX..XXXXXXX 100644
174
index XXXXXXX..XXXXXXX 100644
195
--- a/accel/tcg/translate-all.c
175
--- a/accel/tcg/translate-all.c
196
+++ b/accel/tcg/translate-all.c
176
+++ b/accel/tcg/translate-all.c
197
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
198
199
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
200
if (i == 0) {
201
- prev = (j == 0 ? tb->pc : 0);
202
+ prev = (j == 0 ? tb_pc(tb) : 0);
203
} else {
204
prev = tcg_ctx->gen_insn_data[i - 1][j];
205
}
206
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
207
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
208
uintptr_t searched_pc, bool reset_icount)
209
{
210
- target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
211
+ target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) };
212
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
213
CPUArchState *env = cpu->env_ptr;
214
const uint8_t *p = tb->tc.ptr + tb->tc.size;
215
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
216
const TranslationBlock *a = ap;
217
const TranslationBlock *b = bp;
218
219
- return a->pc == b->pc &&
220
+ return tb_pc(a) == tb_pc(b) &&
221
a->cs_base == b->cs_base &&
222
a->flags == b->flags &&
223
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
224
@@ -XXX,XX +XXX,XX @@ static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
225
TranslationBlock *tb = p;
226
target_ulong addr = *(target_ulong *)userp;
227
228
- if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
229
+ if (!(addr + TARGET_PAGE_SIZE <= tb_pc(tb) ||
230
+ addr >= tb_pc(tb) + tb->size)) {
231
printf("ERROR invalidate: address=" TARGET_FMT_lx
232
- " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
233
+ " PC=%08lx size=%04x\n", addr, (long)tb_pc(tb), tb->size);
234
}
235
}
236
237
@@ -XXX,XX +XXX,XX @@ static void do_tb_page_check(void *p, uint32_t hash, void *userp)
238
TranslationBlock *tb = p;
239
int flags1, flags2;
240
241
- flags1 = page_get_flags(tb->pc);
242
- flags2 = page_get_flags(tb->pc + tb->size - 1);
243
+ flags1 = page_get_flags(tb_pc(tb));
244
+ flags2 = page_get_flags(tb_pc(tb) + tb->size - 1);
245
if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
246
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
247
- (long)tb->pc, tb->size, flags1, flags2);
248
+ (long)tb_pc(tb), tb->size, flags1, flags2);
249
}
250
}
251
252
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
253
254
/* remove the TB from the hash list */
255
phys_pc = tb->page_addr[0];
256
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
257
+ h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags,
258
tb->trace_vcpu_dstate);
259
if (!qht_remove(&tb_ctx.htable, tb, h)) {
260
return;
261
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
262
}
263
264
/* add in the hash table */
265
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
266
+ h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags,
267
tb->trace_vcpu_dstate);
268
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
269
270
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
177
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
271
tcg_ctx->cpu = NULL;
178
tcg_ctx->page_bits = TARGET_PAGE_BITS;
272
max_insns = tb->icount;
179
tcg_ctx->page_mask = TARGET_PAGE_MASK;
273
180
tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
274
- trace_translate_block(tb, tb->pc, tb->tc.ptr);
181
+ tcg_ctx->tlb_fast_offset =
275
+ trace_translate_block(tb, pc, tb->tc.ptr);
182
+ (int)offsetof(ArchCPU, neg.tlb.f) - (int)offsetof(ArchCPU, env);
276
277
/* generate machine code */
278
tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
279
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
280
ti = profile_getclock();
281
#endif
183
#endif
282
184
283
- gen_code_size = tcg_gen_code(tcg_ctx, tb);
185
tb_overflow:
284
+ gen_code_size = tcg_gen_code(tcg_ctx, tb, pc);
285
if (unlikely(gen_code_size < 0)) {
286
error_return:
287
switch (gen_code_size) {
288
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
289
290
#ifdef DEBUG_DISAS
291
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
292
- qemu_log_in_addr_range(tb->pc)) {
293
+ qemu_log_in_addr_range(pc)) {
294
FILE *logfile = qemu_log_trylock();
295
if (logfile) {
296
int code_size, data_size;
297
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
298
*/
299
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
300
301
- qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
302
- "cpu_io_recompile: rewound execution of TB to "
303
- TARGET_FMT_lx "\n", tb->pc);
304
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
305
+ target_ulong pc = log_pc(cpu, tb);
306
+ if (qemu_log_in_addr_range(pc)) {
307
+ qemu_log("cpu_io_recompile: rewound execution of TB to "
308
+ TARGET_FMT_lx "\n", pc);
309
+ }
310
+ }
311
312
cpu_loop_exit_noexc(cpu);
313
}
314
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
315
index XXXXXXX..XXXXXXX 100644
316
--- a/target/arm/cpu.c
317
+++ b/target/arm/cpu.c
318
@@ -XXX,XX +XXX,XX @@ void arm_cpu_synchronize_from_tb(CPUState *cs,
319
* never possible for an AArch64 TB to chain to an AArch32 TB.
320
*/
321
if (is_a64(env)) {
322
- env->pc = tb->pc;
323
+ env->pc = tb_pc(tb);
324
} else {
325
- env->regs[15] = tb->pc;
326
+ env->regs[15] = tb_pc(tb);
327
}
328
}
329
#endif /* CONFIG_TCG */
330
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/avr/cpu.c
333
+++ b/target/avr/cpu.c
334
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_synchronize_from_tb(CPUState *cs,
335
AVRCPU *cpu = AVR_CPU(cs);
336
CPUAVRState *env = &cpu->env;
337
338
- env->pc_w = tb->pc / 2; /* internally PC points to words */
339
+ env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */
340
}
341
342
static void avr_cpu_reset(DeviceState *ds)
343
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/hexagon/cpu.c
346
+++ b/target/hexagon/cpu.c
347
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
348
{
349
HexagonCPU *cpu = HEXAGON_CPU(cs);
350
CPUHexagonState *env = &cpu->env;
351
- env->gpr[HEX_REG_PC] = tb->pc;
352
+ env->gpr[HEX_REG_PC] = tb_pc(tb);
353
}
354
355
static bool hexagon_cpu_has_work(CPUState *cs)
356
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/hppa/cpu.c
359
+++ b/target/hppa/cpu.c
360
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
361
HPPACPU *cpu = HPPA_CPU(cs);
362
363
#ifdef CONFIG_USER_ONLY
364
- cpu->env.iaoq_f = tb->pc;
365
+ cpu->env.iaoq_f = tb_pc(tb);
366
cpu->env.iaoq_b = tb->cs_base;
367
#else
368
/* Recover the IAOQ values from the GVA + PRIV. */
369
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
370
int32_t diff = cs_base;
371
372
cpu->env.iasq_f = iasq_f;
373
- cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv;
374
+ cpu->env.iaoq_f = (tb_pc(tb) & ~iasq_f) + priv;
375
if (diff) {
376
cpu->env.iaoq_b = cpu->env.iaoq_f + diff;
377
}
378
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
379
index XXXXXXX..XXXXXXX 100644
380
--- a/target/i386/tcg/tcg-cpu.c
381
+++ b/target/i386/tcg/tcg-cpu.c
382
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
383
{
384
X86CPU *cpu = X86_CPU(cs);
385
386
- cpu->env.eip = tb->pc - tb->cs_base;
387
+ cpu->env.eip = tb_pc(tb) - tb->cs_base;
388
}
389
390
#ifndef CONFIG_USER_ONLY
391
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
392
index XXXXXXX..XXXXXXX 100644
393
--- a/target/loongarch/cpu.c
394
+++ b/target/loongarch/cpu.c
395
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
396
LoongArchCPU *cpu = LOONGARCH_CPU(cs);
397
CPULoongArchState *env = &cpu->env;
398
399
- env->pc = tb->pc;
400
+ env->pc = tb_pc(tb);
401
}
402
#endif /* CONFIG_TCG */
403
404
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
405
index XXXXXXX..XXXXXXX 100644
406
--- a/target/microblaze/cpu.c
407
+++ b/target/microblaze/cpu.c
408
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_synchronize_from_tb(CPUState *cs,
409
{
410
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
411
412
- cpu->env.pc = tb->pc;
413
+ cpu->env.pc = tb_pc(tb);
414
cpu->env.iflags = tb->flags & IFLAGS_TB_MASK;
415
}
416
417
diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c
418
index XXXXXXX..XXXXXXX 100644
419
--- a/target/mips/tcg/exception.c
420
+++ b/target/mips/tcg/exception.c
421
@@ -XXX,XX +XXX,XX @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb)
422
MIPSCPU *cpu = MIPS_CPU(cs);
423
CPUMIPSState *env = &cpu->env;
424
425
- env->active_tc.PC = tb->pc;
426
+ env->active_tc.PC = tb_pc(tb);
427
env->hflags &= ~MIPS_HFLAG_BMASK;
428
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
429
}
430
diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c
431
index XXXXXXX..XXXXXXX 100644
432
--- a/target/mips/tcg/sysemu/special_helper.c
433
+++ b/target/mips/tcg/sysemu/special_helper.c
434
@@ -XXX,XX +XXX,XX @@ bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb)
435
CPUMIPSState *env = &cpu->env;
436
437
if ((env->hflags & MIPS_HFLAG_BMASK) != 0
438
- && env->active_tc.PC != tb->pc) {
439
+ && env->active_tc.PC != tb_pc(tb)) {
440
env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
441
env->hflags &= ~MIPS_HFLAG_BMASK;
442
return true;
443
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/openrisc/cpu.c
446
+++ b/target/openrisc/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
448
{
449
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
450
451
- cpu->env.pc = tb->pc;
452
+ cpu->env.pc = tb_pc(tb);
453
}
454
455
456
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
457
index XXXXXXX..XXXXXXX 100644
458
--- a/target/riscv/cpu.c
459
+++ b/target/riscv/cpu.c
460
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_synchronize_from_tb(CPUState *cs,
461
RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
462
463
if (xl == MXL_RV32) {
464
- env->pc = (int32_t)tb->pc;
465
+ env->pc = (int32_t)tb_pc(tb);
466
} else {
467
- env->pc = tb->pc;
468
+ env->pc = tb_pc(tb);
469
}
470
}
471
472
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
473
index XXXXXXX..XXXXXXX 100644
474
--- a/target/rx/cpu.c
475
+++ b/target/rx/cpu.c
476
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_synchronize_from_tb(CPUState *cs,
477
{
478
RXCPU *cpu = RX_CPU(cs);
479
480
- cpu->env.pc = tb->pc;
481
+ cpu->env.pc = tb_pc(tb);
482
}
483
484
static bool rx_cpu_has_work(CPUState *cs)
485
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
486
index XXXXXXX..XXXXXXX 100644
487
--- a/target/sh4/cpu.c
488
+++ b/target/sh4/cpu.c
489
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_synchronize_from_tb(CPUState *cs,
490
{
491
SuperHCPU *cpu = SUPERH_CPU(cs);
492
493
- cpu->env.pc = tb->pc;
494
+ cpu->env.pc = tb_pc(tb);
495
cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
496
}
497
498
@@ -XXX,XX +XXX,XX @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
499
CPUSH4State *env = &cpu->env;
500
501
if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
502
- && env->pc != tb->pc) {
503
+ && env->pc != tb_pc(tb)) {
504
env->pc -= 2;
505
env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
506
return true;
507
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
508
index XXXXXXX..XXXXXXX 100644
509
--- a/target/sparc/cpu.c
510
+++ b/target/sparc/cpu.c
511
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs,
512
{
513
SPARCCPU *cpu = SPARC_CPU(cs);
514
515
- cpu->env.pc = tb->pc;
516
+ cpu->env.pc = tb_pc(tb);
517
cpu->env.npc = tb->cs_base;
518
}
519
520
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
521
index XXXXXXX..XXXXXXX 100644
522
--- a/target/tricore/cpu.c
523
+++ b/target/tricore/cpu.c
524
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_synchronize_from_tb(CPUState *cs,
525
TriCoreCPU *cpu = TRICORE_CPU(cs);
526
CPUTriCoreState *env = &cpu->env;
527
528
- env->PC = tb->pc;
529
+ env->PC = tb_pc(tb);
530
}
531
532
static void tricore_cpu_reset(DeviceState *dev)
533
diff --git a/tcg/tcg.c b/tcg/tcg.c
186
diff --git a/tcg/tcg.c b/tcg/tcg.c
534
index XXXXXXX..XXXXXXX 100644
187
index XXXXXXX..XXXXXXX 100644
535
--- a/tcg/tcg.c
188
--- a/tcg/tcg.c
536
+++ b/tcg/tcg.c
189
+++ b/tcg/tcg.c
537
@@ -XXX,XX +XXX,XX @@ int64_t tcg_cpu_exec_time(void)
190
@@ -XXX,XX +XXX,XX @@
538
#endif
191
#define NO_CPU_IO_DEFS
539
192
540
193
#include "exec/exec-all.h"
541
-int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
194
+#include "exec/tlb-common.h"
542
+int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
195
#include "tcg/tcg-op.h"
543
{
196
544
#ifdef CONFIG_PROFILER
197
#if UINTPTR_MAX == UINT32_MAX
545
TCGProfile *prof = &s->prof;
198
@@ -XXX,XX +XXX,XX @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which)
546
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
199
return (uintptr_t)tcg_splitwx_to_rx(&s->gen_tb->jmp_target_addr[which]);
547
200
}
548
#ifdef DEBUG_DISAS
201
549
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
202
+#if defined(CONFIG_SOFTMMU) && !defined(CONFIG_TCG_INTERPRETER)
550
- && qemu_log_in_addr_range(tb->pc))) {
203
+static int tlb_mask_table_ofs(TCGContext *s, int which)
551
+ && qemu_log_in_addr_range(pc_start))) {
204
+{
552
FILE *logfile = qemu_log_trylock();
205
+ return s->tlb_fast_offset + which * sizeof(CPUTLBDescFast);
553
if (logfile) {
206
+}
554
fprintf(logfile, "OP:\n");
207
+#endif
555
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
208
+
556
if (s->nb_indirects > 0) {
209
/* Signal overflow, starting over with fewer guest insns. */
557
#ifdef DEBUG_DISAS
210
static G_NORETURN
558
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
211
void tcg_raise_tb_overflow(TCGContext *s)
559
- && qemu_log_in_addr_range(tb->pc))) {
212
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s)
560
+ && qemu_log_in_addr_range(pc_start))) {
213
561
FILE *logfile = qemu_log_trylock();
214
tcg_debug_assert(s->addr_type == TCG_TYPE_I32 ||
562
if (logfile) {
215
s->addr_type == TCG_TYPE_I64);
563
fprintf(logfile, "OP before indirect lowering:\n");
216
+
564
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
217
+#if defined(CONFIG_SOFTMMU) && !defined(CONFIG_TCG_INTERPRETER)
565
218
+ tcg_debug_assert(s->tlb_fast_offset < 0);
566
#ifdef DEBUG_DISAS
219
+ tcg_debug_assert(s->tlb_fast_offset >= MIN_TLB_MASK_TABLE_OFS);
567
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
220
+#endif
568
- && qemu_log_in_addr_range(tb->pc))) {
221
}
569
+ && qemu_log_in_addr_range(pc_start))) {
222
570
FILE *logfile = qemu_log_trylock();
223
static TCGTemp *tcg_temp_alloc(TCGContext *s)
571
if (logfile) {
224
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
572
fprintf(logfile, "OP after optimization and liveness analysis:\n");
225
index XXXXXXX..XXXXXXX 100644
226
--- a/tcg/aarch64/tcg-target.c.inc
227
+++ b/tcg/aarch64/tcg-target.c.inc
228
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
229
return true;
230
}
231
232
+/* We expect to use a 7-bit scaled negative offset from ENV. */
233
+#define MIN_TLB_MASK_TABLE_OFS -512
234
+
235
/*
236
* For softmmu, perform the TLB load and compare.
237
* For useronly, perform any required alignment tests.
238
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
239
? TCG_TYPE_I64 : TCG_TYPE_I32);
240
241
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {tmp0,tmp1}. */
242
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
243
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512);
244
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
245
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
246
tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0,
247
- TLB_MASK_TABLE_OFS(mem_index), 1, 0);
248
+ tlb_mask_table_ofs(s, mem_index), 1, 0);
249
250
/* Extract the TLB index from the address into X0. */
251
tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
252
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
253
index XXXXXXX..XXXXXXX 100644
254
--- a/tcg/arm/tcg-target.c.inc
255
+++ b/tcg/arm/tcg-target.c.inc
256
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
257
return true;
258
}
259
260
+/* We expect to use an 9-bit sign-magnitude negative offset from ENV. */
261
+#define MIN_TLB_MASK_TABLE_OFS -256
262
+
263
static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
264
TCGReg addrlo, TCGReg addrhi,
265
MemOpIdx oi, bool is_ld)
266
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
267
int mem_index = get_mmuidx(oi);
268
int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
269
: offsetof(CPUTLBEntry, addr_write);
270
- int fast_off = TLB_MASK_TABLE_OFS(mem_index);
271
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
272
unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
273
TCGReg t_addr;
274
275
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
276
ldst->addrhi_reg = addrhi;
277
278
/* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
279
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
280
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
281
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
282
QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
283
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
284
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
285
index XXXXXXX..XXXXXXX 100644
286
--- a/tcg/i386/tcg-target.c.inc
287
+++ b/tcg/i386/tcg-target.c.inc
288
@@ -XXX,XX +XXX,XX @@ static inline int setup_guest_base_seg(void)
289
#endif /* setup_guest_base_seg */
290
#endif /* !SOFTMMU */
291
292
+#define MIN_TLB_MASK_TABLE_OFS INT_MIN
293
+
294
/*
295
* For softmmu, perform the TLB load and compare.
296
* For useronly, perform any required alignment tests.
297
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
298
int trexw = 0, hrexw = 0, tlbrexw = 0;
299
unsigned mem_index = get_mmuidx(oi);
300
unsigned s_mask = (1 << s_bits) - 1;
301
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
302
int tlb_mask;
303
304
ldst = new_ldst_label(s);
305
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
306
s->page_bits - CPU_TLB_ENTRY_BITS);
307
308
tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, TCG_REG_L0, TCG_AREG0,
309
- TLB_MASK_TABLE_OFS(mem_index) +
310
- offsetof(CPUTLBDescFast, mask));
311
+ fast_ofs + offsetof(CPUTLBDescFast, mask));
312
313
tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, TCG_REG_L0, TCG_AREG0,
314
- TLB_MASK_TABLE_OFS(mem_index) +
315
- offsetof(CPUTLBDescFast, table));
316
+ fast_ofs + offsetof(CPUTLBDescFast, table));
317
318
/*
319
* If the required alignment is at least as large as the access, simply
320
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
321
index XXXXXXX..XXXXXXX 100644
322
--- a/tcg/loongarch64/tcg-target.c.inc
323
+++ b/tcg/loongarch64/tcg-target.c.inc
324
@@ -XXX,XX +XXX,XX @@ bool tcg_target_has_memory_bswap(MemOp memop)
325
return false;
326
}
327
328
+/* We expect to use a 12-bit negative offset from ENV. */
329
+#define MIN_TLB_MASK_TABLE_OFS -(1 << 11)
330
+
331
/*
332
* For softmmu, perform the TLB load and compare.
333
* For useronly, perform any required alignment tests.
334
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
335
#ifdef CONFIG_SOFTMMU
336
unsigned s_bits = opc & MO_SIZE;
337
int mem_index = get_mmuidx(oi);
338
- int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
339
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
340
int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
341
int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
342
343
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
344
ldst->oi = oi;
345
ldst->addrlo_reg = addr_reg;
346
347
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
348
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
349
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
350
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
351
352
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
353
index XXXXXXX..XXXXXXX 100644
354
--- a/tcg/mips/tcg-target.c.inc
355
+++ b/tcg/mips/tcg-target.c.inc
356
@@ -XXX,XX +XXX,XX @@ bool tcg_target_has_memory_bswap(MemOp memop)
357
return false;
358
}
359
360
+/* We expect to use a 16-bit negative offset from ENV. */
361
+#define MIN_TLB_MASK_TABLE_OFS -32768
362
+
363
/*
364
* For softmmu, perform the TLB load and compare.
365
* For useronly, perform any required alignment tests.
366
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
367
#ifdef CONFIG_SOFTMMU
368
unsigned s_mask = (1 << s_bits) - 1;
369
int mem_index = get_mmuidx(oi);
370
- int fast_off = TLB_MASK_TABLE_OFS(mem_index);
371
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
372
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
373
int table_off = fast_off + offsetof(CPUTLBDescFast, table);
374
int add_off = offsetof(CPUTLBEntry, addend);
375
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
376
ldst->addrhi_reg = addrhi;
377
378
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
379
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
380
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
381
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off);
382
tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off);
383
384
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
385
index XXXXXXX..XXXXXXX 100644
386
--- a/tcg/ppc/tcg-target.c.inc
387
+++ b/tcg/ppc/tcg-target.c.inc
388
@@ -XXX,XX +XXX,XX @@ bool tcg_target_has_memory_bswap(MemOp memop)
389
return aa.atom <= MO_64;
390
}
391
392
+/* We expect to use a 16-bit negative offset from ENV. */
393
+#define MIN_TLB_MASK_TABLE_OFS -32768
394
+
395
/*
396
* For softmmu, perform the TLB load and compare.
397
* For useronly, perform any required alignment tests.
398
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
399
int mem_index = get_mmuidx(oi);
400
int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
401
: offsetof(CPUTLBEntry, addr_write);
402
- int fast_off = TLB_MASK_TABLE_OFS(mem_index);
403
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
404
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
405
int table_off = fast_off + offsetof(CPUTLBDescFast, table);
406
407
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
408
ldst->addrhi_reg = addrhi;
409
410
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
411
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
412
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
413
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
414
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
415
416
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
417
index XXXXXXX..XXXXXXX 100644
418
--- a/tcg/riscv/tcg-target.c.inc
419
+++ b/tcg/riscv/tcg-target.c.inc
420
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
421
return true;
422
}
423
424
+/* We expect to use a 12-bit negative offset from ENV. */
425
+#define MIN_TLB_MASK_TABLE_OFS -(1 << 11)
426
+
427
/*
428
* For softmmu, perform the TLB load and compare.
429
* For useronly, perform any required alignment tests.
430
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
431
unsigned s_bits = opc & MO_SIZE;
432
unsigned s_mask = (1u << s_bits) - 1;
433
int mem_index = get_mmuidx(oi);
434
- int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
435
+ int fast_ofs = tlb_mask_table_ofs(s, mem_index);
436
int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
437
int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
438
int compare_mask;
439
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
440
ldst->oi = oi;
441
ldst->addrlo_reg = addr_reg;
442
443
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
444
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
445
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
446
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
447
448
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
449
index XXXXXXX..XXXXXXX 100644
450
--- a/tcg/s390x/tcg-target.c.inc
451
+++ b/tcg/s390x/tcg-target.c.inc
452
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
453
return true;
454
}
455
456
+/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
457
+#define MIN_TLB_MASK_TABLE_OFS -(1 << 19)
458
+
459
/*
460
* For softmmu, perform the TLB load and compare.
461
* For useronly, perform any required alignment tests.
462
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
463
#ifdef CONFIG_SOFTMMU
464
unsigned s_mask = (1 << s_bits) - 1;
465
int mem_index = get_mmuidx(oi);
466
- int fast_off = TLB_MASK_TABLE_OFS(mem_index);
467
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
468
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
469
int table_off = fast_off + offsetof(CPUTLBDescFast, table);
470
int ofs, a_off;
471
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
472
tcg_out_sh64(s, RSY_SRLG, TCG_TMP0, addr_reg, TCG_REG_NONE,
473
s->page_bits - CPU_TLB_ENTRY_BITS);
474
475
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
476
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
477
tcg_out_insn(s, RXY, NG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, mask_off);
478
tcg_out_insn(s, RXY, AG, TCG_TMP0, TCG_AREG0, TCG_REG_NONE, table_off);
479
480
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
481
index XXXXXXX..XXXXXXX 100644
482
--- a/tcg/sparc64/tcg-target.c.inc
483
+++ b/tcg/sparc64/tcg-target.c.inc
484
@@ -XXX,XX +XXX,XX @@ bool tcg_target_has_memory_bswap(MemOp memop)
485
return true;
486
}
487
488
+/* We expect to use a 13-bit negative offset from ENV. */
489
+#define MIN_TLB_MASK_TABLE_OFS -(1 << 12)
490
+
491
/*
492
* For softmmu, perform the TLB load and compare.
493
* For useronly, perform any required alignment tests.
494
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
495
496
#ifdef CONFIG_SOFTMMU
497
int mem_index = get_mmuidx(oi);
498
- int fast_off = TLB_MASK_TABLE_OFS(mem_index);
499
+ int fast_off = tlb_mask_table_ofs(s, mem_index);
500
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
501
int table_off = fast_off + offsetof(CPUTLBDescFast, table);
502
int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
503
@@ -XXX,XX +XXX,XX @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
504
int cc;
505
506
/* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
507
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
508
- QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
509
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T2, TCG_AREG0, mask_off);
510
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_T3, TCG_AREG0, table_off);
511
573
--
512
--
574
2.34.1
513
2.34.1
575
514
576
515
diff view generated by jsdifflib
New patch
1
This had been pulled in from tcg/tcg.h, via exec/cpu_ldst.h,
2
via exec/exec-all.h, but the include of tcg.h will be removed.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/avr/helper.c | 1 +
8
1 file changed, 1 insertion(+)
9
10
diff --git a/target/avr/helper.c b/target/avr/helper.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/avr/helper.c
13
+++ b/target/avr/helper.c
14
@@ -XXX,XX +XXX,XX @@
15
16
#include "qemu/osdep.h"
17
#include "qemu/log.h"
18
+#include "qemu/error-report.h"
19
#include "cpu.h"
20
#include "hw/core/tcg-cpu-ops.h"
21
#include "exec/exec-all.h"
22
--
23
2.34.1
24
25
diff view generated by jsdifflib
1
Populate this new method for all targets. Always match
1
This had been pulled in from tcg/tcg.h, via exec/cpu_ldst.h,
2
the result that would be given by cpu_get_tb_cpu_state,
2
via exec/exec-all.h, but the include of tcg.h will be removed.
3
as we will want these values to correspond in the logs.
4
3
5
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> (target/sparc)
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
Cc: Eduardo Habkost <eduardo@habkost.net> (supporter:Machine core)
7
target/avr/cpu.c | 1 +
11
Cc: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> (supporter:Machine core)
8
target/rx/cpu.c | 1 +
12
Cc: "Philippe Mathieu-Daudé" <f4bug@amsat.org> (reviewer:Machine core)
9
target/rx/op_helper.c | 1 +
13
Cc: Yanan Wang <wangyanan55@huawei.com> (reviewer:Machine core)
10
target/tricore/cpu.c | 1 +
14
Cc: Michael Rolnik <mrolnik@gmail.com> (maintainer:AVR TCG CPUs)
11
4 files changed, 4 insertions(+)
15
Cc: "Edgar E. Iglesias" <edgar.iglesias@gmail.com> (maintainer:CRIS TCG CPUs)
16
Cc: Taylor Simpson <tsimpson@quicinc.com> (supporter:Hexagon TCG CPUs)
17
Cc: Song Gao <gaosong@loongson.cn> (maintainer:LoongArch TCG CPUs)
18
Cc: Xiaojuan Yang <yangxiaojuan@loongson.cn> (maintainer:LoongArch TCG CPUs)
19
Cc: Laurent Vivier <laurent@vivier.eu> (maintainer:M68K TCG CPUs)
20
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> (reviewer:MIPS TCG CPUs)
21
Cc: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> (reviewer:MIPS TCG CPUs)
22
Cc: Chris Wulff <crwulff@gmail.com> (maintainer:NiosII TCG CPUs)
23
Cc: Marek Vasut <marex@denx.de> (maintainer:NiosII TCG CPUs)
24
Cc: Stafford Horne <shorne@gmail.com> (odd fixer:OpenRISC TCG CPUs)
25
Cc: Yoshinori Sato <ysato@users.sourceforge.jp> (reviewer:RENESAS RX CPUs)
26
Cc: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> (maintainer:SPARC TCG CPUs)
27
Cc: Bastian Koppelmann <kbastian@mail.uni-paderborn.de> (maintainer:TriCore TCG CPUs)
28
Cc: Max Filippov <jcmvbkbc@gmail.com> (maintainer:Xtensa TCG CPUs)
29
Cc: qemu-arm@nongnu.org (open list:ARM TCG CPUs)
30
Cc: qemu-ppc@nongnu.org (open list:PowerPC TCG CPUs)
31
Cc: qemu-riscv@nongnu.org (open list:RISC-V TCG CPUs)
32
Cc: qemu-s390x@nongnu.org (open list:S390 TCG CPUs)
33
---
34
include/hw/core/cpu.h | 3 +++
35
target/alpha/cpu.c | 9 +++++++++
36
target/arm/cpu.c | 13 +++++++++++++
37
target/avr/cpu.c | 8 ++++++++
38
target/cris/cpu.c | 8 ++++++++
39
target/hexagon/cpu.c | 8 ++++++++
40
target/hppa/cpu.c | 8 ++++++++
41
target/i386/cpu.c | 9 +++++++++
42
target/loongarch/cpu.c | 9 +++++++++
43
target/m68k/cpu.c | 8 ++++++++
44
target/microblaze/cpu.c | 8 ++++++++
45
target/mips/cpu.c | 8 ++++++++
46
target/nios2/cpu.c | 9 +++++++++
47
target/openrisc/cpu.c | 8 ++++++++
48
target/ppc/cpu_init.c | 8 ++++++++
49
target/riscv/cpu.c | 13 +++++++++++++
50
target/rx/cpu.c | 8 ++++++++
51
target/s390x/cpu.c | 8 ++++++++
52
target/sh4/cpu.c | 8 ++++++++
53
target/sparc/cpu.c | 8 ++++++++
54
target/tricore/cpu.c | 9 +++++++++
55
target/xtensa/cpu.c | 8 ++++++++
56
22 files changed, 186 insertions(+)
57
12
58
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
59
index XXXXXXX..XXXXXXX 100644
60
--- a/include/hw/core/cpu.h
61
+++ b/include/hw/core/cpu.h
62
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
63
* If the target behaviour here is anything other than "set
64
* the PC register to the value passed in" then the target must
65
* also implement the synchronize_from_tb hook.
66
+ * @get_pc: Callback for getting the Program Counter register.
67
+ * As above, with the semantics of the target architecture.
68
* @gdb_read_register: Callback for letting GDB read a register.
69
* @gdb_write_register: Callback for letting GDB write a register.
70
* @gdb_adjust_breakpoint: Callback for adjusting the address of a
71
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
72
void (*dump_state)(CPUState *cpu, FILE *, int flags);
73
int64_t (*get_arch_id)(CPUState *cpu);
74
void (*set_pc)(CPUState *cpu, vaddr value);
75
+ vaddr (*get_pc)(CPUState *cpu);
76
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
77
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
78
vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
79
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/alpha/cpu.c
82
+++ b/target/alpha/cpu.c
83
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_set_pc(CPUState *cs, vaddr value)
84
cpu->env.pc = value;
85
}
86
87
+static vaddr alpha_cpu_get_pc(CPUState *cs)
88
+{
89
+ AlphaCPU *cpu = ALPHA_CPU(cs);
90
+
91
+ return cpu->env.pc;
92
+}
93
+
94
+
95
static bool alpha_cpu_has_work(CPUState *cs)
96
{
97
/* Here we are checking to see if the CPU should wake up from HALT.
98
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
99
cc->has_work = alpha_cpu_has_work;
100
cc->dump_state = alpha_cpu_dump_state;
101
cc->set_pc = alpha_cpu_set_pc;
102
+ cc->get_pc = alpha_cpu_get_pc;
103
cc->gdb_read_register = alpha_cpu_gdb_read_register;
104
cc->gdb_write_register = alpha_cpu_gdb_write_register;
105
#ifndef CONFIG_USER_ONLY
106
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/cpu.c
109
+++ b/target/arm/cpu.c
110
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
111
}
112
}
113
114
+static vaddr arm_cpu_get_pc(CPUState *cs)
115
+{
116
+ ARMCPU *cpu = ARM_CPU(cs);
117
+ CPUARMState *env = &cpu->env;
118
+
119
+ if (is_a64(env)) {
120
+ return env->pc;
121
+ } else {
122
+ return env->regs[15];
123
+ }
124
+}
125
+
126
#ifdef CONFIG_TCG
127
void arm_cpu_synchronize_from_tb(CPUState *cs,
128
const TranslationBlock *tb)
129
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
130
cc->has_work = arm_cpu_has_work;
131
cc->dump_state = arm_cpu_dump_state;
132
cc->set_pc = arm_cpu_set_pc;
133
+ cc->get_pc = arm_cpu_get_pc;
134
cc->gdb_read_register = arm_cpu_gdb_read_register;
135
cc->gdb_write_register = arm_cpu_gdb_write_register;
136
#ifndef CONFIG_USER_ONLY
137
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
13
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
138
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
139
--- a/target/avr/cpu.c
15
--- a/target/avr/cpu.c
140
+++ b/target/avr/cpu.c
16
+++ b/target/avr/cpu.c
141
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_set_pc(CPUState *cs, vaddr value)
17
@@ -XXX,XX +XXX,XX @@
142
cpu->env.pc_w = value / 2; /* internally PC points to words */
18
#include "exec/exec-all.h"
143
}
19
#include "cpu.h"
144
20
#include "disas/dis-asm.h"
145
+static vaddr avr_cpu_get_pc(CPUState *cs)
21
+#include "tcg/debug-assert.h"
146
+{
22
147
+ AVRCPU *cpu = AVR_CPU(cs);
23
static void avr_cpu_set_pc(CPUState *cs, vaddr value)
148
+
149
+ return cpu->env.pc_w * 2;
150
+}
151
+
152
static bool avr_cpu_has_work(CPUState *cs)
153
{
24
{
154
AVRCPU *cpu = AVR_CPU(cs);
155
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
156
cc->has_work = avr_cpu_has_work;
157
cc->dump_state = avr_cpu_dump_state;
158
cc->set_pc = avr_cpu_set_pc;
159
+ cc->get_pc = avr_cpu_get_pc;
160
dc->vmsd = &vms_avr_cpu;
161
cc->sysemu_ops = &avr_sysemu_ops;
162
cc->disas_set_info = avr_cpu_disas_set_info;
163
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/target/cris/cpu.c
166
+++ b/target/cris/cpu.c
167
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_set_pc(CPUState *cs, vaddr value)
168
cpu->env.pc = value;
169
}
170
171
+static vaddr cris_cpu_get_pc(CPUState *cs)
172
+{
173
+ CRISCPU *cpu = CRIS_CPU(cs);
174
+
175
+ return cpu->env.pc;
176
+}
177
+
178
static bool cris_cpu_has_work(CPUState *cs)
179
{
180
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
181
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
182
cc->has_work = cris_cpu_has_work;
183
cc->dump_state = cris_cpu_dump_state;
184
cc->set_pc = cris_cpu_set_pc;
185
+ cc->get_pc = cris_cpu_get_pc;
186
cc->gdb_read_register = cris_cpu_gdb_read_register;
187
cc->gdb_write_register = cris_cpu_gdb_write_register;
188
#ifndef CONFIG_USER_ONLY
189
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
190
index XXXXXXX..XXXXXXX 100644
191
--- a/target/hexagon/cpu.c
192
+++ b/target/hexagon/cpu.c
193
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_set_pc(CPUState *cs, vaddr value)
194
env->gpr[HEX_REG_PC] = value;
195
}
196
197
+static vaddr hexagon_cpu_get_pc(CPUState *cs)
198
+{
199
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
200
+ CPUHexagonState *env = &cpu->env;
201
+ return env->gpr[HEX_REG_PC];
202
+}
203
+
204
static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
205
const TranslationBlock *tb)
206
{
207
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data)
208
cc->has_work = hexagon_cpu_has_work;
209
cc->dump_state = hexagon_dump_state;
210
cc->set_pc = hexagon_cpu_set_pc;
211
+ cc->get_pc = hexagon_cpu_get_pc;
212
cc->gdb_read_register = hexagon_gdb_read_register;
213
cc->gdb_write_register = hexagon_gdb_write_register;
214
cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS + NUM_VREGS + NUM_QREGS;
215
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
216
index XXXXXXX..XXXXXXX 100644
217
--- a/target/hppa/cpu.c
218
+++ b/target/hppa/cpu.c
219
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
220
cpu->env.iaoq_b = value + 4;
221
}
222
223
+static vaddr hppa_cpu_get_pc(CPUState *cs)
224
+{
225
+ HPPACPU *cpu = HPPA_CPU(cs);
226
+
227
+ return cpu->env.iaoq_f;
228
+}
229
+
230
static void hppa_cpu_synchronize_from_tb(CPUState *cs,
231
const TranslationBlock *tb)
232
{
233
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
234
cc->has_work = hppa_cpu_has_work;
235
cc->dump_state = hppa_cpu_dump_state;
236
cc->set_pc = hppa_cpu_set_pc;
237
+ cc->get_pc = hppa_cpu_get_pc;
238
cc->gdb_read_register = hppa_cpu_gdb_read_register;
239
cc->gdb_write_register = hppa_cpu_gdb_write_register;
240
#ifndef CONFIG_USER_ONLY
241
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
242
index XXXXXXX..XXXXXXX 100644
243
--- a/target/i386/cpu.c
244
+++ b/target/i386/cpu.c
245
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_set_pc(CPUState *cs, vaddr value)
246
cpu->env.eip = value;
247
}
248
249
+static vaddr x86_cpu_get_pc(CPUState *cs)
250
+{
251
+ X86CPU *cpu = X86_CPU(cs);
252
+
253
+ /* Match cpu_get_tb_cpu_state. */
254
+ return cpu->env.eip + cpu->env.segs[R_CS].base;
255
+}
256
+
257
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
258
{
259
X86CPU *cpu = X86_CPU(cs);
260
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
261
cc->has_work = x86_cpu_has_work;
262
cc->dump_state = x86_cpu_dump_state;
263
cc->set_pc = x86_cpu_set_pc;
264
+ cc->get_pc = x86_cpu_get_pc;
265
cc->gdb_read_register = x86_cpu_gdb_read_register;
266
cc->gdb_write_register = x86_cpu_gdb_write_register;
267
cc->get_arch_id = x86_cpu_get_arch_id;
268
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
269
index XXXXXXX..XXXXXXX 100644
270
--- a/target/loongarch/cpu.c
271
+++ b/target/loongarch/cpu.c
272
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
273
env->pc = value;
274
}
275
276
+static vaddr loongarch_cpu_get_pc(CPUState *cs)
277
+{
278
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
279
+ CPULoongArchState *env = &cpu->env;
280
+
281
+ return env->pc;
282
+}
283
+
284
#ifndef CONFIG_USER_ONLY
285
#include "hw/loongarch/virt.h"
286
287
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_class_init(ObjectClass *c, void *data)
288
cc->has_work = loongarch_cpu_has_work;
289
cc->dump_state = loongarch_cpu_dump_state;
290
cc->set_pc = loongarch_cpu_set_pc;
291
+ cc->get_pc = loongarch_cpu_get_pc;
292
#ifndef CONFIG_USER_ONLY
293
dc->vmsd = &vmstate_loongarch_cpu;
294
cc->sysemu_ops = &loongarch_sysemu_ops;
295
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
296
index XXXXXXX..XXXXXXX 100644
297
--- a/target/m68k/cpu.c
298
+++ b/target/m68k/cpu.c
299
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
300
cpu->env.pc = value;
301
}
302
303
+static vaddr m68k_cpu_get_pc(CPUState *cs)
304
+{
305
+ M68kCPU *cpu = M68K_CPU(cs);
306
+
307
+ return cpu->env.pc;
308
+}
309
+
310
static bool m68k_cpu_has_work(CPUState *cs)
311
{
312
return cs->interrupt_request & CPU_INTERRUPT_HARD;
313
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
314
cc->has_work = m68k_cpu_has_work;
315
cc->dump_state = m68k_cpu_dump_state;
316
cc->set_pc = m68k_cpu_set_pc;
317
+ cc->get_pc = m68k_cpu_get_pc;
318
cc->gdb_read_register = m68k_cpu_gdb_read_register;
319
cc->gdb_write_register = m68k_cpu_gdb_write_register;
320
#if defined(CONFIG_SOFTMMU)
321
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
322
index XXXXXXX..XXXXXXX 100644
323
--- a/target/microblaze/cpu.c
324
+++ b/target/microblaze/cpu.c
325
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_set_pc(CPUState *cs, vaddr value)
326
cpu->env.iflags = 0;
327
}
328
329
+static vaddr mb_cpu_get_pc(CPUState *cs)
330
+{
331
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
332
+
333
+ return cpu->env.pc;
334
+}
335
+
336
static void mb_cpu_synchronize_from_tb(CPUState *cs,
337
const TranslationBlock *tb)
338
{
339
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
340
341
cc->dump_state = mb_cpu_dump_state;
342
cc->set_pc = mb_cpu_set_pc;
343
+ cc->get_pc = mb_cpu_get_pc;
344
cc->gdb_read_register = mb_cpu_gdb_read_register;
345
cc->gdb_write_register = mb_cpu_gdb_write_register;
346
347
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
348
index XXXXXXX..XXXXXXX 100644
349
--- a/target/mips/cpu.c
350
+++ b/target/mips/cpu.c
351
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_set_pc(CPUState *cs, vaddr value)
352
mips_env_set_pc(&cpu->env, value);
353
}
354
355
+static vaddr mips_cpu_get_pc(CPUState *cs)
356
+{
357
+ MIPSCPU *cpu = MIPS_CPU(cs);
358
+
359
+ return cpu->env.active_tc.PC;
360
+}
361
+
362
static bool mips_cpu_has_work(CPUState *cs)
363
{
364
MIPSCPU *cpu = MIPS_CPU(cs);
365
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
366
cc->has_work = mips_cpu_has_work;
367
cc->dump_state = mips_cpu_dump_state;
368
cc->set_pc = mips_cpu_set_pc;
369
+ cc->get_pc = mips_cpu_get_pc;
370
cc->gdb_read_register = mips_cpu_gdb_read_register;
371
cc->gdb_write_register = mips_cpu_gdb_write_register;
372
#ifndef CONFIG_USER_ONLY
373
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
374
index XXXXXXX..XXXXXXX 100644
375
--- a/target/nios2/cpu.c
376
+++ b/target/nios2/cpu.c
377
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_set_pc(CPUState *cs, vaddr value)
378
env->pc = value;
379
}
380
381
+static vaddr nios2_cpu_get_pc(CPUState *cs)
382
+{
383
+ Nios2CPU *cpu = NIOS2_CPU(cs);
384
+ CPUNios2State *env = &cpu->env;
385
+
386
+ return env->pc;
387
+}
388
+
389
static bool nios2_cpu_has_work(CPUState *cs)
390
{
391
return cs->interrupt_request & CPU_INTERRUPT_HARD;
392
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
393
cc->has_work = nios2_cpu_has_work;
394
cc->dump_state = nios2_cpu_dump_state;
395
cc->set_pc = nios2_cpu_set_pc;
396
+ cc->get_pc = nios2_cpu_get_pc;
397
cc->disas_set_info = nios2_cpu_disas_set_info;
398
#ifndef CONFIG_USER_ONLY
399
cc->sysemu_ops = &nios2_sysemu_ops;
400
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
401
index XXXXXXX..XXXXXXX 100644
402
--- a/target/openrisc/cpu.c
403
+++ b/target/openrisc/cpu.c
404
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
405
cpu->env.dflag = 0;
406
}
407
408
+static vaddr openrisc_cpu_get_pc(CPUState *cs)
409
+{
410
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
411
+
412
+ return cpu->env.pc;
413
+}
414
+
415
static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
416
const TranslationBlock *tb)
417
{
418
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
419
cc->has_work = openrisc_cpu_has_work;
420
cc->dump_state = openrisc_cpu_dump_state;
421
cc->set_pc = openrisc_cpu_set_pc;
422
+ cc->get_pc = openrisc_cpu_get_pc;
423
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
424
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
425
#ifndef CONFIG_USER_ONLY
426
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
427
index XXXXXXX..XXXXXXX 100644
428
--- a/target/ppc/cpu_init.c
429
+++ b/target/ppc/cpu_init.c
430
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_set_pc(CPUState *cs, vaddr value)
431
cpu->env.nip = value;
432
}
433
434
+static vaddr ppc_cpu_get_pc(CPUState *cs)
435
+{
436
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
437
+
438
+ return cpu->env.nip;
439
+}
440
+
441
static bool ppc_cpu_has_work(CPUState *cs)
442
{
443
PowerPCCPU *cpu = POWERPC_CPU(cs);
444
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
445
cc->has_work = ppc_cpu_has_work;
446
cc->dump_state = ppc_cpu_dump_state;
447
cc->set_pc = ppc_cpu_set_pc;
448
+ cc->get_pc = ppc_cpu_get_pc;
449
cc->gdb_read_register = ppc_cpu_gdb_read_register;
450
cc->gdb_write_register = ppc_cpu_gdb_write_register;
451
#ifndef CONFIG_USER_ONLY
452
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
453
index XXXXXXX..XXXXXXX 100644
454
--- a/target/riscv/cpu.c
455
+++ b/target/riscv/cpu.c
456
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
457
}
458
}
459
460
+static vaddr riscv_cpu_get_pc(CPUState *cs)
461
+{
462
+ RISCVCPU *cpu = RISCV_CPU(cs);
463
+ CPURISCVState *env = &cpu->env;
464
+
465
+ /* Match cpu_get_tb_cpu_state. */
466
+ if (env->xl == MXL_RV32) {
467
+ return env->pc & UINT32_MAX;
468
+ }
469
+ return env->pc;
470
+}
471
+
472
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
473
const TranslationBlock *tb)
474
{
475
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
476
cc->has_work = riscv_cpu_has_work;
477
cc->dump_state = riscv_cpu_dump_state;
478
cc->set_pc = riscv_cpu_set_pc;
479
+ cc->get_pc = riscv_cpu_get_pc;
480
cc->gdb_read_register = riscv_cpu_gdb_read_register;
481
cc->gdb_write_register = riscv_cpu_gdb_write_register;
482
cc->gdb_num_core_regs = 33;
483
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
25
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
484
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
485
--- a/target/rx/cpu.c
27
--- a/target/rx/cpu.c
486
+++ b/target/rx/cpu.c
28
+++ b/target/rx/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_set_pc(CPUState *cs, vaddr value)
29
@@ -XXX,XX +XXX,XX @@
488
cpu->env.pc = value;
30
#include "exec/exec-all.h"
489
}
31
#include "hw/loader.h"
490
32
#include "fpu/softfloat.h"
491
+static vaddr rx_cpu_get_pc(CPUState *cs)
33
+#include "tcg/debug-assert.h"
492
+{
34
493
+ RXCPU *cpu = RX_CPU(cs);
35
static void rx_cpu_set_pc(CPUState *cs, vaddr value)
494
+
495
+ return cpu->env.pc;
496
+}
497
+
498
static void rx_cpu_synchronize_from_tb(CPUState *cs,
499
const TranslationBlock *tb)
500
{
36
{
501
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
37
diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c
502
cc->has_work = rx_cpu_has_work;
503
cc->dump_state = rx_cpu_dump_state;
504
cc->set_pc = rx_cpu_set_pc;
505
+ cc->get_pc = rx_cpu_get_pc;
506
507
#ifndef CONFIG_USER_ONLY
508
cc->sysemu_ops = &rx_sysemu_ops;
509
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
510
index XXXXXXX..XXXXXXX 100644
38
index XXXXXXX..XXXXXXX 100644
511
--- a/target/s390x/cpu.c
39
--- a/target/rx/op_helper.c
512
+++ b/target/s390x/cpu.c
40
+++ b/target/rx/op_helper.c
513
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_set_pc(CPUState *cs, vaddr value)
41
@@ -XXX,XX +XXX,XX @@
514
cpu->env.psw.addr = value;
42
#include "exec/helper-proto.h"
515
}
43
#include "exec/cpu_ldst.h"
516
44
#include "fpu/softfloat.h"
517
+static vaddr s390_cpu_get_pc(CPUState *cs)
45
+#include "tcg/debug-assert.h"
518
+{
46
519
+ S390CPU *cpu = S390_CPU(cs);
47
static inline G_NORETURN
520
+
48
void raise_exception(CPURXState *env, int index,
521
+ return cpu->env.psw.addr;
522
+}
523
+
524
static bool s390_cpu_has_work(CPUState *cs)
525
{
526
S390CPU *cpu = S390_CPU(cs);
527
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
528
cc->has_work = s390_cpu_has_work;
529
cc->dump_state = s390_cpu_dump_state;
530
cc->set_pc = s390_cpu_set_pc;
531
+ cc->get_pc = s390_cpu_get_pc;
532
cc->gdb_read_register = s390_cpu_gdb_read_register;
533
cc->gdb_write_register = s390_cpu_gdb_write_register;
534
#ifndef CONFIG_USER_ONLY
535
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
536
index XXXXXXX..XXXXXXX 100644
537
--- a/target/sh4/cpu.c
538
+++ b/target/sh4/cpu.c
539
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_set_pc(CPUState *cs, vaddr value)
540
cpu->env.pc = value;
541
}
542
543
+static vaddr superh_cpu_get_pc(CPUState *cs)
544
+{
545
+ SuperHCPU *cpu = SUPERH_CPU(cs);
546
+
547
+ return cpu->env.pc;
548
+}
549
+
550
static void superh_cpu_synchronize_from_tb(CPUState *cs,
551
const TranslationBlock *tb)
552
{
553
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
554
cc->has_work = superh_cpu_has_work;
555
cc->dump_state = superh_cpu_dump_state;
556
cc->set_pc = superh_cpu_set_pc;
557
+ cc->get_pc = superh_cpu_get_pc;
558
cc->gdb_read_register = superh_cpu_gdb_read_register;
559
cc->gdb_write_register = superh_cpu_gdb_write_register;
560
#ifndef CONFIG_USER_ONLY
561
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
562
index XXXXXXX..XXXXXXX 100644
563
--- a/target/sparc/cpu.c
564
+++ b/target/sparc/cpu.c
565
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_set_pc(CPUState *cs, vaddr value)
566
cpu->env.npc = value + 4;
567
}
568
569
+static vaddr sparc_cpu_get_pc(CPUState *cs)
570
+{
571
+ SPARCCPU *cpu = SPARC_CPU(cs);
572
+
573
+ return cpu->env.pc;
574
+}
575
+
576
static void sparc_cpu_synchronize_from_tb(CPUState *cs,
577
const TranslationBlock *tb)
578
{
579
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
580
cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
581
#endif
582
cc->set_pc = sparc_cpu_set_pc;
583
+ cc->get_pc = sparc_cpu_get_pc;
584
cc->gdb_read_register = sparc_cpu_gdb_read_register;
585
cc->gdb_write_register = sparc_cpu_gdb_write_register;
586
#ifndef CONFIG_USER_ONLY
587
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
49
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
588
index XXXXXXX..XXXXXXX 100644
50
index XXXXXXX..XXXXXXX 100644
589
--- a/target/tricore/cpu.c
51
--- a/target/tricore/cpu.c
590
+++ b/target/tricore/cpu.c
52
+++ b/target/tricore/cpu.c
591
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_set_pc(CPUState *cs, vaddr value)
53
@@ -XXX,XX +XXX,XX @@
592
env->PC = value & ~(target_ulong)1;
54
#include "cpu.h"
593
}
55
#include "exec/exec-all.h"
594
56
#include "qemu/error-report.h"
595
+static vaddr tricore_cpu_get_pc(CPUState *cs)
57
+#include "tcg/debug-assert.h"
596
+{
58
597
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
59
static inline void set_feature(CPUTriCoreState *env, int feature)
598
+ CPUTriCoreState *env = &cpu->env;
599
+
600
+ return env->PC;
601
+}
602
+
603
static void tricore_cpu_synchronize_from_tb(CPUState *cs,
604
const TranslationBlock *tb)
605
{
60
{
606
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
607
608
cc->dump_state = tricore_cpu_dump_state;
609
cc->set_pc = tricore_cpu_set_pc;
610
+ cc->get_pc = tricore_cpu_get_pc;
611
cc->sysemu_ops = &tricore_sysemu_ops;
612
cc->tcg_ops = &tricore_tcg_ops;
613
}
614
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
615
index XXXXXXX..XXXXXXX 100644
616
--- a/target/xtensa/cpu.c
617
+++ b/target/xtensa/cpu.c
618
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_set_pc(CPUState *cs, vaddr value)
619
cpu->env.pc = value;
620
}
621
622
+static vaddr xtensa_cpu_get_pc(CPUState *cs)
623
+{
624
+ XtensaCPU *cpu = XTENSA_CPU(cs);
625
+
626
+ return cpu->env.pc;
627
+}
628
+
629
static bool xtensa_cpu_has_work(CPUState *cs)
630
{
631
#ifndef CONFIG_USER_ONLY
632
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
633
cc->has_work = xtensa_cpu_has_work;
634
cc->dump_state = xtensa_cpu_dump_state;
635
cc->set_pc = xtensa_cpu_set_pc;
636
+ cc->get_pc = xtensa_cpu_get_pc;
637
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
638
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
639
cc->gdb_stop_before_watchpoint = true;
640
--
61
--
641
2.34.1
62
2.34.1
642
63
643
64
diff view generated by jsdifflib
New patch
1
This had been pulled in from exec/cpu_ldst.h, via exec/exec-all.h,
2
but the include of tcg.h will be removed.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
accel/tcg/monitor.c | 1 +
8
accel/tcg/tcg-accel-ops-mttcg.c | 2 +-
9
accel/tcg/tcg-accel-ops-rr.c | 2 +-
10
target/i386/helper.c | 3 +++
11
target/openrisc/sys_helper.c | 1 +
12
5 files changed, 7 insertions(+), 2 deletions(-)
13
14
diff --git a/accel/tcg/monitor.c b/accel/tcg/monitor.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/accel/tcg/monitor.c
17
+++ b/accel/tcg/monitor.c
18
@@ -XXX,XX +XXX,XX @@
19
#include "sysemu/cpus.h"
20
#include "sysemu/cpu-timers.h"
21
#include "sysemu/tcg.h"
22
+#include "tcg/tcg.h"
23
#include "internal.h"
24
25
26
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/tcg-accel-ops-mttcg.c
29
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
30
@@ -XXX,XX +XXX,XX @@
31
#include "qemu/guest-random.h"
32
#include "exec/exec-all.h"
33
#include "hw/boards.h"
34
-
35
+#include "tcg/tcg.h"
36
#include "tcg-accel-ops.h"
37
#include "tcg-accel-ops-mttcg.h"
38
39
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/accel/tcg/tcg-accel-ops-rr.c
42
+++ b/accel/tcg/tcg-accel-ops-rr.c
43
@@ -XXX,XX +XXX,XX @@
44
#include "qemu/notify.h"
45
#include "qemu/guest-random.h"
46
#include "exec/exec-all.h"
47
-
48
+#include "tcg/tcg.h"
49
#include "tcg-accel-ops.h"
50
#include "tcg-accel-ops-rr.h"
51
#include "tcg-accel-ops-icount.h"
52
diff --git a/target/i386/helper.c b/target/i386/helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/i386/helper.c
55
+++ b/target/i386/helper.c
56
@@ -XXX,XX +XXX,XX @@
57
#include "monitor/monitor.h"
58
#endif
59
#include "qemu/log.h"
60
+#ifdef CONFIG_TCG
61
+#include "tcg/tcg.h"
62
+#endif
63
64
void cpu_sync_avx_hflag(CPUX86State *env)
65
{
66
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/openrisc/sys_helper.c
69
+++ b/target/openrisc/sys_helper.c
70
@@ -XXX,XX +XXX,XX @@
71
#ifndef CONFIG_USER_ONLY
72
#include "hw/boards.h"
73
#endif
74
+#include "tcg/tcg.h"
75
76
#define TO_SPR(group, number) (((group) << 11) + (number))
77
78
--
79
2.34.1
80
81
diff view generated by jsdifflib
New patch
1
Often, the only thing we need to know about the TCG host
2
is the register size.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg.h | 12 +-----------
8
tcg/aarch64/tcg-target-reg-bits.h | 12 ++++++++++++
9
tcg/arm/tcg-target-reg-bits.h | 12 ++++++++++++
10
tcg/i386/tcg-target-reg-bits.h | 16 ++++++++++++++++
11
tcg/i386/tcg-target.h | 2 --
12
tcg/loongarch64/tcg-target-reg-bits.h | 21 +++++++++++++++++++++
13
tcg/loongarch64/tcg-target.h | 11 -----------
14
tcg/mips/tcg-target-reg-bits.h | 18 ++++++++++++++++++
15
tcg/mips/tcg-target.h | 8 --------
16
tcg/ppc/tcg-target-reg-bits.h | 16 ++++++++++++++++
17
tcg/ppc/tcg-target.h | 5 -----
18
tcg/riscv/tcg-target-reg-bits.h | 19 +++++++++++++++++++
19
tcg/riscv/tcg-target.h | 9 ---------
20
tcg/s390x/tcg-target-reg-bits.h | 17 +++++++++++++++++
21
tcg/sparc64/tcg-target-reg-bits.h | 12 ++++++++++++
22
tcg/tci/tcg-target-reg-bits.h | 18 ++++++++++++++++++
23
tcg/tci/tcg-target.h | 8 --------
24
tcg/s390x/tcg-target.c.inc | 5 -----
25
18 files changed, 162 insertions(+), 59 deletions(-)
26
create mode 100644 tcg/aarch64/tcg-target-reg-bits.h
27
create mode 100644 tcg/arm/tcg-target-reg-bits.h
28
create mode 100644 tcg/i386/tcg-target-reg-bits.h
29
create mode 100644 tcg/loongarch64/tcg-target-reg-bits.h
30
create mode 100644 tcg/mips/tcg-target-reg-bits.h
31
create mode 100644 tcg/ppc/tcg-target-reg-bits.h
32
create mode 100644 tcg/riscv/tcg-target-reg-bits.h
33
create mode 100644 tcg/s390x/tcg-target-reg-bits.h
34
create mode 100644 tcg/sparc64/tcg-target-reg-bits.h
35
create mode 100644 tcg/tci/tcg-target-reg-bits.h
36
37
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
38
index XXXXXXX..XXXXXXX 100644
39
--- a/include/tcg/tcg.h
40
+++ b/include/tcg/tcg.h
41
@@ -XXX,XX +XXX,XX @@
42
#include "qemu/plugin.h"
43
#include "qemu/queue.h"
44
#include "tcg/tcg-mo.h"
45
+#include "tcg-target-reg-bits.h"
46
#include "tcg-target.h"
47
#include "tcg/tcg-cond.h"
48
#include "tcg/debug-assert.h"
49
@@ -XXX,XX +XXX,XX @@
50
#define CPU_TEMP_BUF_NLONGS 128
51
#define TCG_STATIC_FRAME_SIZE (CPU_TEMP_BUF_NLONGS * sizeof(long))
52
53
-/* Default target word size to pointer size. */
54
-#ifndef TCG_TARGET_REG_BITS
55
-# if UINTPTR_MAX == UINT32_MAX
56
-# define TCG_TARGET_REG_BITS 32
57
-# elif UINTPTR_MAX == UINT64_MAX
58
-# define TCG_TARGET_REG_BITS 64
59
-# else
60
-# error Unknown pointer size for tcg target
61
-# endif
62
-#endif
63
-
64
#if TCG_TARGET_REG_BITS == 32
65
typedef int32_t tcg_target_long;
66
typedef uint32_t tcg_target_ulong;
67
diff --git a/tcg/aarch64/tcg-target-reg-bits.h b/tcg/aarch64/tcg-target-reg-bits.h
68
new file mode 100644
69
index XXXXXXX..XXXXXXX
70
--- /dev/null
71
+++ b/tcg/aarch64/tcg-target-reg-bits.h
72
@@ -XXX,XX +XXX,XX @@
73
+/* SPDX-License-Identifier: GPL-2.0-or-later */
74
+/*
75
+ * Define target-specific register size
76
+ * Copyright (c) 2023 Linaro
77
+ */
78
+
79
+#ifndef TCG_TARGET_REG_BITS_H
80
+#define TCG_TARGET_REG_BITS_H
81
+
82
+#define TCG_TARGET_REG_BITS 64
83
+
84
+#endif
85
diff --git a/tcg/arm/tcg-target-reg-bits.h b/tcg/arm/tcg-target-reg-bits.h
86
new file mode 100644
87
index XXXXXXX..XXXXXXX
88
--- /dev/null
89
+++ b/tcg/arm/tcg-target-reg-bits.h
90
@@ -XXX,XX +XXX,XX @@
91
+/* SPDX-License-Identifier: MIT */
92
+/*
93
+ * Define target-specific register size
94
+ * Copyright (c) 2023 Linaro
95
+ */
96
+
97
+#ifndef TCG_TARGET_REG_BITS_H
98
+#define TCG_TARGET_REG_BITS_H
99
+
100
+#define TCG_TARGET_REG_BITS 32
101
+
102
+#endif
103
diff --git a/tcg/i386/tcg-target-reg-bits.h b/tcg/i386/tcg-target-reg-bits.h
104
new file mode 100644
105
index XXXXXXX..XXXXXXX
106
--- /dev/null
107
+++ b/tcg/i386/tcg-target-reg-bits.h
108
@@ -XXX,XX +XXX,XX @@
109
+/* SPDX-License-Identifier: MIT */
110
+/*
111
+ * Define target-specific register size
112
+ * Copyright (c) 2008 Fabrice Bellard
113
+ */
114
+
115
+#ifndef TCG_TARGET_REG_BITS_H
116
+#define TCG_TARGET_REG_BITS_H
117
+
118
+#ifdef __x86_64__
119
+# define TCG_TARGET_REG_BITS 64
120
+#else
121
+# define TCG_TARGET_REG_BITS 32
122
+#endif
123
+
124
+#endif
125
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
126
index XXXXXXX..XXXXXXX 100644
127
--- a/tcg/i386/tcg-target.h
128
+++ b/tcg/i386/tcg-target.h
129
@@ -XXX,XX +XXX,XX @@
130
#define TCG_TARGET_INSN_UNIT_SIZE 1
131
132
#ifdef __x86_64__
133
-# define TCG_TARGET_REG_BITS 64
134
# define TCG_TARGET_NB_REGS 32
135
# define MAX_CODE_GEN_BUFFER_SIZE (2 * GiB)
136
#else
137
-# define TCG_TARGET_REG_BITS 32
138
# define TCG_TARGET_NB_REGS 24
139
# define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX
140
#endif
141
diff --git a/tcg/loongarch64/tcg-target-reg-bits.h b/tcg/loongarch64/tcg-target-reg-bits.h
142
new file mode 100644
143
index XXXXXXX..XXXXXXX
144
--- /dev/null
145
+++ b/tcg/loongarch64/tcg-target-reg-bits.h
146
@@ -XXX,XX +XXX,XX @@
147
+/* SPDX-License-Identifier: MIT */
148
+/*
149
+ * Define target-specific register size
150
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
151
+ */
152
+
153
+#ifndef TCG_TARGET_REG_BITS_H
154
+#define TCG_TARGET_REG_BITS_H
155
+
156
+/*
157
+ * Loongson removed the (incomplete) 32-bit support from kernel and toolchain
158
+ * for the initial upstreaming of this architecture, so don't bother and just
159
+ * support the LP64* ABI for now.
160
+ */
161
+#if defined(__loongarch64)
162
+# define TCG_TARGET_REG_BITS 64
163
+#else
164
+# error unsupported LoongArch register size
165
+#endif
166
+
167
+#endif
168
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
169
index XXXXXXX..XXXXXXX 100644
170
--- a/tcg/loongarch64/tcg-target.h
171
+++ b/tcg/loongarch64/tcg-target.h
172
@@ -XXX,XX +XXX,XX @@
173
#ifndef LOONGARCH_TCG_TARGET_H
174
#define LOONGARCH_TCG_TARGET_H
175
176
-/*
177
- * Loongson removed the (incomplete) 32-bit support from kernel and toolchain
178
- * for the initial upstreaming of this architecture, so don't bother and just
179
- * support the LP64* ABI for now.
180
- */
181
-#if defined(__loongarch64)
182
-# define TCG_TARGET_REG_BITS 64
183
-#else
184
-# error unsupported LoongArch register size
185
-#endif
186
-
187
#define TCG_TARGET_INSN_UNIT_SIZE 4
188
#define TCG_TARGET_NB_REGS 32
189
190
diff --git a/tcg/mips/tcg-target-reg-bits.h b/tcg/mips/tcg-target-reg-bits.h
191
new file mode 100644
192
index XXXXXXX..XXXXXXX
193
--- /dev/null
194
+++ b/tcg/mips/tcg-target-reg-bits.h
195
@@ -XXX,XX +XXX,XX @@
196
+/* SPDX-License-Identifier: MIT */
197
+/*
198
+ * Define target-specific register size
199
+ * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
200
+ */
201
+
202
+#ifndef TCG_TARGET_REG_BITS_H
203
+#define TCG_TARGET_REG_BITS_H
204
+
205
+#if _MIPS_SIM == _ABIO32
206
+# define TCG_TARGET_REG_BITS 32
207
+#elif _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64
208
+# define TCG_TARGET_REG_BITS 64
209
+#else
210
+# error "Unknown ABI"
211
+#endif
212
+
213
+#endif
214
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tcg/mips/tcg-target.h
217
+++ b/tcg/mips/tcg-target.h
218
@@ -XXX,XX +XXX,XX @@
219
#ifndef MIPS_TCG_TARGET_H
220
#define MIPS_TCG_TARGET_H
221
222
-#if _MIPS_SIM == _ABIO32
223
-# define TCG_TARGET_REG_BITS 32
224
-#elif _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64
225
-# define TCG_TARGET_REG_BITS 64
226
-#else
227
-# error "Unknown ABI"
228
-#endif
229
-
230
#define TCG_TARGET_INSN_UNIT_SIZE 4
231
#define TCG_TARGET_NB_REGS 32
232
233
diff --git a/tcg/ppc/tcg-target-reg-bits.h b/tcg/ppc/tcg-target-reg-bits.h
234
new file mode 100644
235
index XXXXXXX..XXXXXXX
236
--- /dev/null
237
+++ b/tcg/ppc/tcg-target-reg-bits.h
238
@@ -XXX,XX +XXX,XX @@
239
+/* SPDX-License-Identifier: MIT */
240
+/*
241
+ * Define target-specific register size
242
+ * Copyright (c) 2008 Fabrice Bellard
243
+ */
244
+
245
+#ifndef TCG_TARGET_REG_BITS_H
246
+#define TCG_TARGET_REG_BITS_H
247
+
248
+#ifdef _ARCH_PPC64
249
+# define TCG_TARGET_REG_BITS 64
250
+#else
251
+# define TCG_TARGET_REG_BITS 32
252
+#endif
253
+
254
+#endif
255
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
256
index XXXXXXX..XXXXXXX 100644
257
--- a/tcg/ppc/tcg-target.h
258
+++ b/tcg/ppc/tcg-target.h
259
@@ -XXX,XX +XXX,XX @@
260
#ifndef PPC_TCG_TARGET_H
261
#define PPC_TCG_TARGET_H
262
263
-#ifdef _ARCH_PPC64
264
-# define TCG_TARGET_REG_BITS 64
265
-#else
266
-# define TCG_TARGET_REG_BITS 32
267
-#endif
268
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
269
270
#define TCG_TARGET_NB_REGS 64
271
diff --git a/tcg/riscv/tcg-target-reg-bits.h b/tcg/riscv/tcg-target-reg-bits.h
272
new file mode 100644
273
index XXXXXXX..XXXXXXX
274
--- /dev/null
275
+++ b/tcg/riscv/tcg-target-reg-bits.h
276
@@ -XXX,XX +XXX,XX @@
277
+/* SPDX-License-Identifier: MIT */
278
+/*
279
+ * Define target-specific register size
280
+ * Copyright (c) 2018 SiFive, Inc
281
+ */
282
+
283
+#ifndef TCG_TARGET_REG_BITS_H
284
+#define TCG_TARGET_REG_BITS_H
285
+
286
+/*
287
+ * We don't support oversize guests.
288
+ * Since we will only build tcg once, this in turn requires a 64-bit host.
289
+ */
290
+#if __riscv_xlen != 64
291
+#error "unsupported code generation mode"
292
+#endif
293
+#define TCG_TARGET_REG_BITS 64
294
+
295
+#endif
296
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
297
index XXXXXXX..XXXXXXX 100644
298
--- a/tcg/riscv/tcg-target.h
299
+++ b/tcg/riscv/tcg-target.h
300
@@ -XXX,XX +XXX,XX @@
301
#ifndef RISCV_TCG_TARGET_H
302
#define RISCV_TCG_TARGET_H
303
304
-/*
305
- * We don't support oversize guests.
306
- * Since we will only build tcg once, this in turn requires a 64-bit host.
307
- */
308
-#if __riscv_xlen != 64
309
-#error "unsupported code generation mode"
310
-#endif
311
-#define TCG_TARGET_REG_BITS 64
312
-
313
#define TCG_TARGET_INSN_UNIT_SIZE 4
314
#define TCG_TARGET_NB_REGS 32
315
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
316
diff --git a/tcg/s390x/tcg-target-reg-bits.h b/tcg/s390x/tcg-target-reg-bits.h
317
new file mode 100644
318
index XXXXXXX..XXXXXXX
319
--- /dev/null
320
+++ b/tcg/s390x/tcg-target-reg-bits.h
321
@@ -XXX,XX +XXX,XX @@
322
+/* SPDX-License-Identifier: MIT */
323
+/*
324
+ * Define target-specific register size
325
+ * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
326
+ */
327
+
328
+#ifndef TCG_TARGET_REG_BITS_H
329
+#define TCG_TARGET_REG_BITS_H
330
+
331
+/* We only support generating code for 64-bit mode. */
332
+#if UINTPTR_MAX == UINT64_MAX
333
+# define TCG_TARGET_REG_BITS 64
334
+#else
335
+# error "unsupported code generation mode"
336
+#endif
337
+
338
+#endif
339
diff --git a/tcg/sparc64/tcg-target-reg-bits.h b/tcg/sparc64/tcg-target-reg-bits.h
340
new file mode 100644
341
index XXXXXXX..XXXXXXX
342
--- /dev/null
343
+++ b/tcg/sparc64/tcg-target-reg-bits.h
344
@@ -XXX,XX +XXX,XX @@
345
+/* SPDX-License-Identifier: MIT */
346
+/*
347
+ * Define target-specific register size
348
+ * Copyright (c) 2023 Linaro
349
+ */
350
+
351
+#ifndef TCG_TARGET_REG_BITS_H
352
+#define TCG_TARGET_REG_BITS_H
353
+
354
+#define TCG_TARGET_REG_BITS 64
355
+
356
+#endif
357
diff --git a/tcg/tci/tcg-target-reg-bits.h b/tcg/tci/tcg-target-reg-bits.h
358
new file mode 100644
359
index XXXXXXX..XXXXXXX
360
--- /dev/null
361
+++ b/tcg/tci/tcg-target-reg-bits.h
362
@@ -XXX,XX +XXX,XX @@
363
+/* SPDX-License-Identifier: MIT */
364
+/*
365
+ * Define target-specific register size
366
+ * Copyright (c) 2009, 2011 Stefan Weil
367
+ */
368
+
369
+#ifndef TCG_TARGET_REG_BITS_H
370
+#define TCG_TARGET_REG_BITS_H
371
+
372
+#if UINTPTR_MAX == UINT32_MAX
373
+# define TCG_TARGET_REG_BITS 32
374
+#elif UINTPTR_MAX == UINT64_MAX
375
+# define TCG_TARGET_REG_BITS 64
376
+#else
377
+# error Unknown pointer size for tci target
378
+#endif
379
+
380
+#endif
381
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
382
index XXXXXXX..XXXXXXX 100644
383
--- a/tcg/tci/tcg-target.h
384
+++ b/tcg/tci/tcg-target.h
385
@@ -XXX,XX +XXX,XX @@
386
#define TCG_TARGET_INSN_UNIT_SIZE 4
387
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
388
389
-#if UINTPTR_MAX == UINT32_MAX
390
-# define TCG_TARGET_REG_BITS 32
391
-#elif UINTPTR_MAX == UINT64_MAX
392
-# define TCG_TARGET_REG_BITS 64
393
-#else
394
-# error Unknown pointer size for tci target
395
-#endif
396
-
397
/* Optional instructions. */
398
399
#define TCG_TARGET_HAS_bswap16_i32 1
400
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
401
index XXXXXXX..XXXXXXX 100644
402
--- a/tcg/s390x/tcg-target.c.inc
403
+++ b/tcg/s390x/tcg-target.c.inc
404
@@ -XXX,XX +XXX,XX @@
405
* THE SOFTWARE.
406
*/
407
408
-/* We only support generating code for 64-bit mode. */
409
-#if TCG_TARGET_REG_BITS != 64
410
-#error "unsupported code generation mode"
411
-#endif
412
-
413
#include "../tcg-ldst.c.inc"
414
#include "../tcg-pool.c.inc"
415
#include "elf.h"
416
--
417
2.34.1
418
419
diff view generated by jsdifflib
New patch
1
The symbol is always defined, even if to 0. We wanted to test for
2
TCG_OVERSIZED_GUEST == 0.
1
3
4
This fixed, the #error is reached while building arm-softmmu, because
5
TCG_OVERSIZED_GUEST is not true (nor supposed to be true) for arm32
6
guest on a 32-bit host. But that's ok, because this feature doesn't
7
apply to arm32. Add an #ifdef for TARGET_AARCH64.
8
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
target/arm/ptw.c | 7 ++++++-
13
1 file changed, 6 insertions(+), 1 deletion(-)
14
15
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/arm/ptw.c
18
+++ b/target/arm/ptw.c
19
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
20
uint64_t new_val, S1Translate *ptw,
21
ARMMMUFaultInfo *fi)
22
{
23
+#ifdef TARGET_AARCH64
24
uint64_t cur_val;
25
void *host = ptw->out_host;
26
27
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
28
* we know that TCG_OVERSIZED_GUEST is set, which means that we are
29
* running in round-robin mode and could only race with dma i/o.
30
*/
31
-#ifndef TCG_OVERSIZED_GUEST
32
+#if !TCG_OVERSIZED_GUEST
33
# error "Unexpected configuration"
34
#endif
35
bool locked = qemu_mutex_iothread_locked();
36
@@ -XXX,XX +XXX,XX @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
37
#endif
38
39
return cur_val;
40
+#else
41
+ /* AArch32 does not have FEAT_HADFS. */
42
+ g_assert_not_reached();
43
+#endif
44
}
45
46
static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
47
--
48
2.34.1
49
50
diff view generated by jsdifflib
1
When PAGE_WRITE_INV is set when calling tlb_set_page,
1
Move a use of TARGET_LONG_BITS out of tcg/tcg.h.
2
we immediately set TLB_INVALID_MASK in order to force
2
Include the new file only where required.
3
tlb_fill to be called on the next lookup. Here in
4
probe_access_internal, we have just called tlb_fill
5
and eliminated true misses, thus the lookup must be valid.
6
3
7
This allows us to remove a warning comment from s390x.
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
There doesn't seem to be a reason to change the code though.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: David Hildenbrand <david@redhat.com>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
6
---
15
accel/tcg/cputlb.c | 10 +++++++++-
7
include/exec/cpu_ldst.h | 3 +--
16
target/s390x/tcg/mem_helper.c | 4 ----
8
include/tcg/oversized-guest.h | 23 +++++++++++++++++++++++
17
2 files changed, 9 insertions(+), 5 deletions(-)
9
include/tcg/tcg.h | 9 ---------
10
accel/tcg/cputlb.c | 1 +
11
accel/tcg/tcg-all.c | 1 +
12
target/arm/ptw.c | 1 +
13
target/riscv/cpu_helper.c | 1 +
14
7 files changed, 28 insertions(+), 11 deletions(-)
15
create mode 100644 include/tcg/oversized-guest.h
18
16
17
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu_ldst.h
20
+++ b/include/exec/cpu_ldst.h
21
@@ -XXX,XX +XXX,XX @@ static inline void clear_helper_retaddr(void)
22
23
#else
24
25
-/* Needed for TCG_OVERSIZED_GUEST */
26
-#include "tcg/tcg.h"
27
+#include "tcg/oversized-guest.h"
28
29
static inline target_ulong tlb_read_idx(const CPUTLBEntry *entry,
30
MMUAccessType access_type)
31
diff --git a/include/tcg/oversized-guest.h b/include/tcg/oversized-guest.h
32
new file mode 100644
33
index XXXXXXX..XXXXXXX
34
--- /dev/null
35
+++ b/include/tcg/oversized-guest.h
36
@@ -XXX,XX +XXX,XX @@
37
+/* SPDX-License-Identifier: MIT */
38
+/*
39
+ * Define TCG_OVERSIZED_GUEST
40
+ * Copyright (c) 2008 Fabrice Bellard
41
+ */
42
+
43
+#ifndef EXEC_TCG_OVERSIZED_GUEST_H
44
+#define EXEC_TCG_OVERSIZED_GUEST_H
45
+
46
+#include "tcg-target-reg-bits.h"
47
+#include "cpu-param.h"
48
+
49
+/*
50
+ * Oversized TCG guests make things like MTTCG hard
51
+ * as we can't use atomics for cputlb updates.
52
+ */
53
+#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
54
+#define TCG_OVERSIZED_GUEST 1
55
+#else
56
+#define TCG_OVERSIZED_GUEST 0
57
+#endif
58
+
59
+#endif
60
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
61
index XXXXXXX..XXXXXXX 100644
62
--- a/include/tcg/tcg.h
63
+++ b/include/tcg/tcg.h
64
@@ -XXX,XX +XXX,XX @@ typedef uint64_t tcg_target_ulong;
65
#error unsupported
66
#endif
67
68
-/* Oversized TCG guests make things like MTTCG hard
69
- * as we can't use atomics for cputlb updates.
70
- */
71
-#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
72
-#define TCG_OVERSIZED_GUEST 1
73
-#else
74
-#define TCG_OVERSIZED_GUEST 0
75
-#endif
76
-
77
#if TCG_TARGET_NB_REGS <= 32
78
typedef uint32_t TCGRegSet;
79
#elif TCG_TARGET_NB_REGS <= 64
19
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
80
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
20
index XXXXXXX..XXXXXXX 100644
81
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/cputlb.c
82
--- a/accel/tcg/cputlb.c
22
+++ b/accel/tcg/cputlb.c
83
+++ b/accel/tcg/cputlb.c
23
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
84
@@ -XXX,XX +XXX,XX @@
24
}
85
#include "qemu/plugin-memory.h"
25
tlb_addr = tlb_read_ofs(entry, elt_ofs);
86
#endif
26
87
#include "tcg/tcg-ldst.h"
27
+ flags = TLB_FLAGS_MASK;
88
+#include "tcg/oversized-guest.h"
28
page_addr = addr & TARGET_PAGE_MASK;
89
#include "exec/helper-proto.h"
29
if (!tlb_hit_page(tlb_addr, page_addr)) {
90
30
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
91
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
31
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
92
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
32
33
/* TLB resize via tlb_fill may have moved the entry. */
34
entry = tlb_entry(env, mmu_idx, addr);
35
+
36
+ /*
37
+ * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
38
+ * to force the next access through tlb_fill. We've just
39
+ * called tlb_fill, so we know that this entry *is* valid.
40
+ */
41
+ flags &= ~TLB_INVALID_MASK;
42
}
43
tlb_addr = tlb_read_ofs(entry, elt_ofs);
44
}
45
- flags = tlb_addr & TLB_FLAGS_MASK;
46
+ flags &= tlb_addr;
47
48
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
49
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
50
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
51
index XXXXXXX..XXXXXXX 100644
93
index XXXXXXX..XXXXXXX 100644
52
--- a/target/s390x/tcg/mem_helper.c
94
--- a/accel/tcg/tcg-all.c
53
+++ b/target/s390x/tcg/mem_helper.c
95
+++ b/accel/tcg/tcg-all.c
54
@@ -XXX,XX +XXX,XX @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
96
@@ -XXX,XX +XXX,XX @@
55
#else
97
#include "exec/replay-core.h"
56
int flags;
98
#include "sysemu/cpu-timers.h"
57
99
#include "tcg/tcg.h"
58
- /*
100
+#include "tcg/oversized-guest.h"
59
- * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL
101
#include "qapi/error.h"
60
- * to detect if there was an exception during tlb_fill().
102
#include "qemu/error-report.h"
61
- */
103
#include "qemu/accel.h"
62
env->tlb_fill_exc = 0;
104
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
63
flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost,
105
index XXXXXXX..XXXXXXX 100644
64
ra);
106
--- a/target/arm/ptw.c
107
+++ b/target/arm/ptw.c
108
@@ -XXX,XX +XXX,XX @@
109
#include "cpu.h"
110
#include "internals.h"
111
#include "idau.h"
112
+#include "tcg/oversized-guest.h"
113
114
115
typedef struct S1Translate {
116
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
117
index XXXXXXX..XXXXXXX 100644
118
--- a/target/riscv/cpu_helper.c
119
+++ b/target/riscv/cpu_helper.c
120
@@ -XXX,XX +XXX,XX @@
121
#include "sysemu/cpu-timers.h"
122
#include "cpu_bits.h"
123
#include "debug.h"
124
+#include "tcg/oversized-guest.h"
125
126
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
127
{
65
--
128
--
66
2.34.1
129
2.34.1
67
130
68
131
diff view generated by jsdifflib
New patch
1
These two items are the last uses of TARGET_LONG_BITS within tcg.h,
2
and are more in common with the other "_tl" definitions within that file.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-op.h | 15 ++++++++++++++-
8
include/tcg/tcg.h | 19 -------------------
9
target/mips/tcg/translate.h | 1 +
10
3 files changed, 15 insertions(+), 20 deletions(-)
11
12
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/tcg/tcg-op.h
15
+++ b/include/tcg/tcg-op.h
16
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_plugin_cb_end(void)
17
}
18
19
#if TARGET_LONG_BITS == 32
20
+typedef TCGv_i32 TCGv;
21
#define tcg_temp_new() tcg_temp_new_i32()
22
#define tcg_global_mem_new tcg_global_mem_new_i32
23
#define tcg_temp_free tcg_temp_free_i32
24
#define tcgv_tl_temp tcgv_i32_temp
25
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i32
26
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i32
27
-#else
28
+#elif TARGET_LONG_BITS == 64
29
+typedef TCGv_i64 TCGv;
30
#define tcg_temp_new() tcg_temp_new_i64()
31
#define tcg_global_mem_new tcg_global_mem_new_i64
32
#define tcg_temp_free tcg_temp_free_i64
33
#define tcgv_tl_temp tcgv_i64_temp
34
#define tcg_gen_qemu_ld_tl tcg_gen_qemu_ld_i64
35
#define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64
36
+#else
37
+#error Unhandled TARGET_LONG_BITS value
38
#endif
39
40
void tcg_gen_qemu_ld_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType);
41
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
42
#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i64
43
#define tcg_gen_dup_tl_vec tcg_gen_dup_i64_vec
44
#define tcg_gen_dup_tl tcg_gen_dup_i64
45
+#define dup_const_tl dup_const
46
#else
47
#define tcg_gen_movi_tl tcg_gen_movi_i32
48
#define tcg_gen_mov_tl tcg_gen_mov_i32
49
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
50
#define tcg_gen_atomic_umax_fetch_tl tcg_gen_atomic_umax_fetch_i32
51
#define tcg_gen_dup_tl_vec tcg_gen_dup_i32_vec
52
#define tcg_gen_dup_tl tcg_gen_dup_i32
53
+
54
+#define dup_const_tl(VECE, C) \
55
+ (__builtin_constant_p(VECE) \
56
+ ? ( (VECE) == MO_8 ? 0x01010101ul * (uint8_t)(C) \
57
+ : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C) \
58
+ : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \
59
+ : (qemu_build_not_reached_always(), 0)) \
60
+ : (target_long)dup_const(VECE, C))
61
#endif
62
63
#if UINTPTR_MAX == UINT32_MAX
64
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
65
index XXXXXXX..XXXXXXX 100644
66
--- a/include/tcg/tcg.h
67
+++ b/include/tcg/tcg.h
68
@@ -XXX,XX +XXX,XX @@ typedef struct TCGv_i128_d *TCGv_i128;
69
typedef struct TCGv_ptr_d *TCGv_ptr;
70
typedef struct TCGv_vec_d *TCGv_vec;
71
typedef TCGv_ptr TCGv_env;
72
-#if TARGET_LONG_BITS == 32
73
-#define TCGv TCGv_i32
74
-#elif TARGET_LONG_BITS == 64
75
-#define TCGv TCGv_i64
76
-#else
77
-#error Unhandled TARGET_LONG_BITS value
78
-#endif
79
80
/* call flags */
81
/* Helper does not read globals (either directly or through an exception). It
82
@@ -XXX,XX +XXX,XX @@ uint64_t dup_const(unsigned vece, uint64_t c);
83
: (qemu_build_not_reached_always(), 0)) \
84
: dup_const(VECE, C))
85
86
-#if TARGET_LONG_BITS == 64
87
-# define dup_const_tl dup_const
88
-#else
89
-# define dup_const_tl(VECE, C) \
90
- (__builtin_constant_p(VECE) \
91
- ? ( (VECE) == MO_8 ? 0x01010101ul * (uint8_t)(C) \
92
- : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C) \
93
- : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \
94
- : (qemu_build_not_reached_always(), 0)) \
95
- : (target_long)dup_const(VECE, C))
96
-#endif
97
-
98
#ifdef CONFIG_DEBUG_TCG
99
void tcg_assert_listed_vecop(TCGOpcode);
100
#else
101
diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h
102
index XXXXXXX..XXXXXXX 100644
103
--- a/target/mips/tcg/translate.h
104
+++ b/target/mips/tcg/translate.h
105
@@ -XXX,XX +XXX,XX @@
106
107
#include "qemu/log.h"
108
#include "exec/translator.h"
109
+#include "tcg/tcg-op.h"
110
111
#define MIPS_DEBUG_DISAS 0
112
113
--
114
2.34.1
115
116
diff view generated by jsdifflib
New patch
1
Create tcg/tcg-op-common.h, moving everything that does not concern
2
TARGET_LONG_BITS or TCGv. Adjust tcg/*.c to use the new header
3
instead of tcg-op.h, in preparation for compiling tcg/ only once.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg-op-common.h | 996 ++++++++++++++++++++++++++++++++++
9
include/tcg/tcg-op.h | 1004 +----------------------------------
10
tcg/optimize.c | 2 +-
11
tcg/tcg-op-gvec.c | 2 +-
12
tcg/tcg-op-ldst.c | 2 +-
13
tcg/tcg-op-vec.c | 2 +-
14
tcg/tcg-op.c | 2 +-
15
tcg/tcg.c | 2 +-
16
tcg/tci.c | 3 +-
17
9 files changed, 1007 insertions(+), 1008 deletions(-)
18
create mode 100644 include/tcg/tcg-op-common.h
19
20
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
21
new file mode 100644
22
index XXXXXXX..XXXXXXX
23
--- /dev/null
24
+++ b/include/tcg/tcg-op-common.h
25
@@ -XXX,XX +XXX,XX @@
26
+/* SPDX-License-Identifier: MIT */
27
+/*
28
+ * Target independent opcode generation functions.
29
+ *
30
+ * Copyright (c) 2008 Fabrice Bellard
31
+ */
32
+
33
+#ifndef TCG_TCG_OP_COMMON_H
34
+#define TCG_TCG_OP_COMMON_H
35
+
36
+#include "tcg/tcg.h"
37
+#include "exec/helper-proto.h"
38
+#include "exec/helper-gen.h"
39
+
40
+/* Basic output routines. Not for general consumption. */
41
+
42
+void tcg_gen_op1(TCGOpcode, TCGArg);
43
+void tcg_gen_op2(TCGOpcode, TCGArg, TCGArg);
44
+void tcg_gen_op3(TCGOpcode, TCGArg, TCGArg, TCGArg);
45
+void tcg_gen_op4(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg);
46
+void tcg_gen_op5(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
47
+void tcg_gen_op6(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
48
+
49
+void vec_gen_2(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg);
50
+void vec_gen_3(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg);
51
+void vec_gen_4(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg, TCGArg);
52
+
53
+static inline void tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1)
54
+{
55
+ tcg_gen_op1(opc, tcgv_i32_arg(a1));
56
+}
57
+
58
+static inline void tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1)
59
+{
60
+ tcg_gen_op1(opc, tcgv_i64_arg(a1));
61
+}
62
+
63
+static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg a1)
64
+{
65
+ tcg_gen_op1(opc, a1);
66
+}
67
+
68
+static inline void tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2)
69
+{
70
+ tcg_gen_op2(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2));
71
+}
72
+
73
+static inline void tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2)
74
+{
75
+ tcg_gen_op2(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2));
76
+}
77
+
78
+static inline void tcg_gen_op2i_i32(TCGOpcode opc, TCGv_i32 a1, TCGArg a2)
79
+{
80
+ tcg_gen_op2(opc, tcgv_i32_arg(a1), a2);
81
+}
82
+
83
+static inline void tcg_gen_op2i_i64(TCGOpcode opc, TCGv_i64 a1, TCGArg a2)
84
+{
85
+ tcg_gen_op2(opc, tcgv_i64_arg(a1), a2);
86
+}
87
+
88
+static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg a1, TCGArg a2)
89
+{
90
+ tcg_gen_op2(opc, a1, a2);
91
+}
92
+
93
+static inline void tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1,
94
+ TCGv_i32 a2, TCGv_i32 a3)
95
+{
96
+ tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3));
97
+}
98
+
99
+static inline void tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1,
100
+ TCGv_i64 a2, TCGv_i64 a3)
101
+{
102
+ tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3));
103
+}
104
+
105
+static inline void tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1,
106
+ TCGv_i32 a2, TCGArg a3)
107
+{
108
+ tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3);
109
+}
110
+
111
+static inline void tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1,
112
+ TCGv_i64 a2, TCGArg a3)
113
+{
114
+ tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3);
115
+}
116
+
117
+static inline void tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val,
118
+ TCGv_ptr base, TCGArg offset)
119
+{
120
+ tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_ptr_arg(base), offset);
121
+}
122
+
123
+static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
124
+ TCGv_ptr base, TCGArg offset)
125
+{
126
+ tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_ptr_arg(base), offset);
127
+}
128
+
129
+static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
130
+ TCGv_i32 a3, TCGv_i32 a4)
131
+{
132
+ tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
133
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4));
134
+}
135
+
136
+static inline void tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
137
+ TCGv_i64 a3, TCGv_i64 a4)
138
+{
139
+ tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
140
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4));
141
+}
142
+
143
+static inline void tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
144
+ TCGv_i32 a3, TCGArg a4)
145
+{
146
+ tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
147
+ tcgv_i32_arg(a3), a4);
148
+}
149
+
150
+static inline void tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
151
+ TCGv_i64 a3, TCGArg a4)
152
+{
153
+ tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
154
+ tcgv_i64_arg(a3), a4);
155
+}
156
+
157
+static inline void tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
158
+ TCGArg a3, TCGArg a4)
159
+{
160
+ tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4);
161
+}
162
+
163
+static inline void tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
164
+ TCGArg a3, TCGArg a4)
165
+{
166
+ tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4);
167
+}
168
+
169
+static inline void tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
170
+ TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5)
171
+{
172
+ tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
173
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5));
174
+}
175
+
176
+static inline void tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
177
+ TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5)
178
+{
179
+ tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
180
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5));
181
+}
182
+
183
+static inline void tcg_gen_op5i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
184
+ TCGv_i32 a3, TCGv_i32 a4, TCGArg a5)
185
+{
186
+ tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
187
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5);
188
+}
189
+
190
+static inline void tcg_gen_op5i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
191
+ TCGv_i64 a3, TCGv_i64 a4, TCGArg a5)
192
+{
193
+ tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
194
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5);
195
+}
196
+
197
+static inline void tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
198
+ TCGv_i32 a3, TCGArg a4, TCGArg a5)
199
+{
200
+ tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
201
+ tcgv_i32_arg(a3), a4, a5);
202
+}
203
+
204
+static inline void tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
205
+ TCGv_i64 a3, TCGArg a4, TCGArg a5)
206
+{
207
+ tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
208
+ tcgv_i64_arg(a3), a4, a5);
209
+}
210
+
211
+static inline void tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
212
+ TCGv_i32 a3, TCGv_i32 a4,
213
+ TCGv_i32 a5, TCGv_i32 a6)
214
+{
215
+ tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
216
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5),
217
+ tcgv_i32_arg(a6));
218
+}
219
+
220
+static inline void tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
221
+ TCGv_i64 a3, TCGv_i64 a4,
222
+ TCGv_i64 a5, TCGv_i64 a6)
223
+{
224
+ tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
225
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5),
226
+ tcgv_i64_arg(a6));
227
+}
228
+
229
+static inline void tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
230
+ TCGv_i32 a3, TCGv_i32 a4,
231
+ TCGv_i32 a5, TCGArg a6)
232
+{
233
+ tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
234
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), a6);
235
+}
236
+
237
+static inline void tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
238
+ TCGv_i64 a3, TCGv_i64 a4,
239
+ TCGv_i64 a5, TCGArg a6)
240
+{
241
+ tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
242
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), a6);
243
+}
244
+
245
+static inline void tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
246
+ TCGv_i32 a3, TCGv_i32 a4,
247
+ TCGArg a5, TCGArg a6)
248
+{
249
+ tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
250
+ tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6);
251
+}
252
+
253
+static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
254
+ TCGv_i64 a3, TCGv_i64 a4,
255
+ TCGArg a5, TCGArg a6)
256
+{
257
+ tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
258
+ tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5, a6);
259
+}
260
+
261
+
262
+/* Generic ops. */
263
+
264
+static inline void gen_set_label(TCGLabel *l)
265
+{
266
+ l->present = 1;
267
+ tcg_gen_op1(INDEX_op_set_label, label_arg(l));
268
+}
269
+
270
+void tcg_gen_br(TCGLabel *l);
271
+void tcg_gen_mb(TCGBar);
272
+
273
+/**
274
+ * tcg_gen_exit_tb() - output exit_tb TCG operation
275
+ * @tb: The TranslationBlock from which we are exiting
276
+ * @idx: Direct jump slot index, or exit request
277
+ *
278
+ * See tcg/README for more info about this TCG operation.
279
+ * See also tcg.h and the block comment above TB_EXIT_MASK.
280
+ *
281
+ * For a normal exit from the TB, back to the main loop, @tb should
282
+ * be NULL and @idx should be 0. Otherwise, @tb should be valid and
283
+ * @idx should be one of the TB_EXIT_ values.
284
+ */
285
+void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx);
286
+
287
+/**
288
+ * tcg_gen_goto_tb() - output goto_tb TCG operation
289
+ * @idx: Direct jump slot index (0 or 1)
290
+ *
291
+ * See tcg/README for more info about this TCG operation.
292
+ *
293
+ * NOTE: In softmmu emulation, direct jumps with goto_tb are only safe within
294
+ * the pages this TB resides in because we don't take care of direct jumps when
295
+ * address mapping changes, e.g. in tlb_flush(). In user mode, there's only a
296
+ * static address translation, so the destination address is always valid, TBs
297
+ * are always invalidated properly, and direct jumps are reset when mapping
298
+ * changes.
299
+ */
300
+void tcg_gen_goto_tb(unsigned idx);
301
+
302
+/**
303
+ * tcg_gen_lookup_and_goto_ptr() - look up the current TB, jump to it if valid
304
+ * @addr: Guest address of the target TB
305
+ *
306
+ * If the TB is not valid, jump to the epilogue.
307
+ *
308
+ * This operation is optional. If the TCG backend does not implement goto_ptr,
309
+ * this op is equivalent to calling tcg_gen_exit_tb() with 0 as the argument.
310
+ */
311
+void tcg_gen_lookup_and_goto_ptr(void);
312
+
313
+static inline void tcg_gen_plugin_cb_start(unsigned from, unsigned type,
314
+ unsigned wr)
315
+{
316
+ tcg_gen_op3(INDEX_op_plugin_cb_start, from, type, wr);
317
+}
318
+
319
+static inline void tcg_gen_plugin_cb_end(void)
320
+{
321
+ tcg_emit_op(INDEX_op_plugin_cb_end, 0);
322
+}
323
+
324
+/* 32 bit ops */
325
+
326
+void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg);
327
+void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
328
+void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2);
329
+void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
330
+void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
331
+void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
332
+void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
333
+void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
334
+void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
335
+void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
336
+void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
337
+void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
338
+void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
339
+void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
340
+void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
341
+void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
342
+void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
343
+void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
344
+void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
345
+void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
346
+void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
347
+void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
348
+void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
349
+void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
350
+void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg);
351
+void tcg_gen_ctpop_i32(TCGv_i32 a1, TCGv_i32 a2);
352
+void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
353
+void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
354
+void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
355
+void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
356
+void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
357
+ unsigned int ofs, unsigned int len);
358
+void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
359
+ unsigned int ofs, unsigned int len);
360
+void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
361
+ unsigned int ofs, unsigned int len);
362
+void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
363
+ unsigned int ofs, unsigned int len);
364
+void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
365
+ unsigned int ofs);
366
+void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *);
367
+void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *);
368
+void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
369
+ TCGv_i32 arg1, TCGv_i32 arg2);
370
+void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
371
+ TCGv_i32 arg1, int32_t arg2);
372
+void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
373
+ TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2);
374
+void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
375
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
376
+void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
377
+ TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
378
+void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
379
+void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
380
+void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
381
+void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg);
382
+void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg);
383
+void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
384
+void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
385
+void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags);
386
+void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
387
+void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg);
388
+void tcg_gen_smin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
389
+void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
390
+void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
391
+void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
392
+void tcg_gen_abs_i32(TCGv_i32, TCGv_i32);
393
+
394
+/* Replicate a value of size @vece from @in to all the lanes in @out */
395
+void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in);
396
+
397
+static inline void tcg_gen_discard_i32(TCGv_i32 arg)
398
+{
399
+ tcg_gen_op1_i32(INDEX_op_discard, arg);
400
+}
401
+
402
+static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
403
+{
404
+ if (ret != arg) {
405
+ tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg);
406
+ }
407
+}
408
+
409
+static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2,
410
+ tcg_target_long offset)
411
+{
412
+ tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset);
413
+}
414
+
415
+static inline void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2,
416
+ tcg_target_long offset)
417
+{
418
+ tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset);
419
+}
420
+
421
+static inline void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2,
422
+ tcg_target_long offset)
423
+{
424
+ tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset);
425
+}
426
+
427
+static inline void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2,
428
+ tcg_target_long offset)
429
+{
430
+ tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset);
431
+}
432
+
433
+static inline void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2,
434
+ tcg_target_long offset)
435
+{
436
+ tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset);
437
+}
438
+
439
+static inline void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2,
440
+ tcg_target_long offset)
441
+{
442
+ tcg_gen_ldst_op_i32(INDEX_op_st8_i32, arg1, arg2, offset);
443
+}
444
+
445
+static inline void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2,
446
+ tcg_target_long offset)
447
+{
448
+ tcg_gen_ldst_op_i32(INDEX_op_st16_i32, arg1, arg2, offset);
449
+}
450
+
451
+static inline void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2,
452
+ tcg_target_long offset)
453
+{
454
+ tcg_gen_ldst_op_i32(INDEX_op_st_i32, arg1, arg2, offset);
455
+}
456
+
457
+static inline void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
458
+{
459
+ tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2);
460
+}
461
+
462
+static inline void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
463
+{
464
+ tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2);
465
+}
466
+
467
+static inline void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
468
+{
469
+ tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
470
+}
471
+
472
+static inline void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
473
+{
474
+ tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2);
475
+}
476
+
477
+static inline void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
478
+{
479
+ tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2);
480
+}
481
+
482
+static inline void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
483
+{
484
+ tcg_gen_op3_i32(INDEX_op_shl_i32, ret, arg1, arg2);
485
+}
486
+
487
+static inline void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
488
+{
489
+ tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2);
490
+}
491
+
492
+static inline void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
493
+{
494
+ tcg_gen_op3_i32(INDEX_op_sar_i32, ret, arg1, arg2);
495
+}
496
+
497
+static inline void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
498
+{
499
+ tcg_gen_op3_i32(INDEX_op_mul_i32, ret, arg1, arg2);
500
+}
501
+
502
+static inline void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg)
503
+{
504
+ if (TCG_TARGET_HAS_neg_i32) {
505
+ tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg);
506
+ } else {
507
+ tcg_gen_subfi_i32(ret, 0, arg);
508
+ }
509
+}
510
+
511
+static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
512
+{
513
+ if (TCG_TARGET_HAS_not_i32) {
514
+ tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
515
+ } else {
516
+ tcg_gen_xori_i32(ret, arg, -1);
517
+ }
518
+}
519
+
520
+/* 64 bit ops */
521
+
522
+void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg);
523
+void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
524
+void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2);
525
+void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
526
+void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
527
+void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
528
+void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
529
+void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
530
+void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
531
+void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
532
+void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
533
+void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
534
+void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
535
+void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
536
+void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
537
+void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
538
+void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
539
+void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
540
+void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
541
+void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
542
+void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
543
+void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
544
+void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
545
+void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
546
+void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg);
547
+void tcg_gen_ctpop_i64(TCGv_i64 a1, TCGv_i64 a2);
548
+void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
549
+void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
550
+void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
551
+void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
552
+void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
553
+ unsigned int ofs, unsigned int len);
554
+void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
555
+ unsigned int ofs, unsigned int len);
556
+void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
557
+ unsigned int ofs, unsigned int len);
558
+void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
559
+ unsigned int ofs, unsigned int len);
560
+void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
561
+ unsigned int ofs);
562
+void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *);
563
+void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *);
564
+void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
565
+ TCGv_i64 arg1, TCGv_i64 arg2);
566
+void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
567
+ TCGv_i64 arg1, int64_t arg2);
568
+void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
569
+ TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2);
570
+void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
571
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
572
+void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
573
+ TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
574
+void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
575
+void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
576
+void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
577
+void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg);
578
+void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg);
579
+void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg);
580
+void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg);
581
+void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg);
582
+void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg);
583
+void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
584
+void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
585
+void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
586
+void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
587
+void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg);
588
+void tcg_gen_wswap_i64(TCGv_i64 ret, TCGv_i64 arg);
589
+void tcg_gen_smin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
590
+void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
591
+void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
592
+void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
593
+void tcg_gen_abs_i64(TCGv_i64, TCGv_i64);
594
+
595
+/* Replicate a value of size @vece from @in to all the lanes in @out */
596
+void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in);
597
+
598
+#if TCG_TARGET_REG_BITS == 64
599
+static inline void tcg_gen_discard_i64(TCGv_i64 arg)
600
+{
601
+ tcg_gen_op1_i64(INDEX_op_discard, arg);
602
+}
603
+
604
+static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
605
+{
606
+ if (ret != arg) {
607
+ tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg);
608
+ }
609
+}
610
+
611
+static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2,
612
+ tcg_target_long offset)
613
+{
614
+ tcg_gen_ldst_op_i64(INDEX_op_ld8u_i64, ret, arg2, offset);
615
+}
616
+
617
+static inline void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2,
618
+ tcg_target_long offset)
619
+{
620
+ tcg_gen_ldst_op_i64(INDEX_op_ld8s_i64, ret, arg2, offset);
621
+}
622
+
623
+static inline void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2,
624
+ tcg_target_long offset)
625
+{
626
+ tcg_gen_ldst_op_i64(INDEX_op_ld16u_i64, ret, arg2, offset);
627
+}
628
+
629
+static inline void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2,
630
+ tcg_target_long offset)
631
+{
632
+ tcg_gen_ldst_op_i64(INDEX_op_ld16s_i64, ret, arg2, offset);
633
+}
634
+
635
+static inline void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2,
636
+ tcg_target_long offset)
637
+{
638
+ tcg_gen_ldst_op_i64(INDEX_op_ld32u_i64, ret, arg2, offset);
639
+}
640
+
641
+static inline void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2,
642
+ tcg_target_long offset)
643
+{
644
+ tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset);
645
+}
646
+
647
+static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2,
648
+ tcg_target_long offset)
649
+{
650
+ tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset);
651
+}
652
+
653
+static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2,
654
+ tcg_target_long offset)
655
+{
656
+ tcg_gen_ldst_op_i64(INDEX_op_st8_i64, arg1, arg2, offset);
657
+}
658
+
659
+static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2,
660
+ tcg_target_long offset)
661
+{
662
+ tcg_gen_ldst_op_i64(INDEX_op_st16_i64, arg1, arg2, offset);
663
+}
664
+
665
+static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2,
666
+ tcg_target_long offset)
667
+{
668
+ tcg_gen_ldst_op_i64(INDEX_op_st32_i64, arg1, arg2, offset);
669
+}
670
+
671
+static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2,
672
+ tcg_target_long offset)
673
+{
674
+ tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset);
675
+}
676
+
677
+static inline void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
678
+{
679
+ tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2);
680
+}
681
+
682
+static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
683
+{
684
+ tcg_gen_op3_i64(INDEX_op_sub_i64, ret, arg1, arg2);
685
+}
686
+
687
+static inline void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
688
+{
689
+ tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
690
+}
691
+
692
+static inline void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
693
+{
694
+ tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2);
695
+}
696
+
697
+static inline void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
698
+{
699
+ tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2);
700
+}
701
+
702
+static inline void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
703
+{
704
+ tcg_gen_op3_i64(INDEX_op_shl_i64, ret, arg1, arg2);
705
+}
706
+
707
+static inline void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
708
+{
709
+ tcg_gen_op3_i64(INDEX_op_shr_i64, ret, arg1, arg2);
710
+}
711
+
712
+static inline void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
713
+{
714
+ tcg_gen_op3_i64(INDEX_op_sar_i64, ret, arg1, arg2);
715
+}
716
+
717
+static inline void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
718
+{
719
+ tcg_gen_op3_i64(INDEX_op_mul_i64, ret, arg1, arg2);
720
+}
721
+#else /* TCG_TARGET_REG_BITS == 32 */
722
+void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
723
+void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
724
+void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
725
+
726
+void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
727
+void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
728
+
729
+void tcg_gen_discard_i64(TCGv_i64 arg);
730
+void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg);
731
+void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
732
+void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
733
+void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
734
+void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
735
+void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
736
+void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
737
+void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
738
+void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
739
+void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
740
+void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
741
+void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
742
+void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
743
+void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
744
+void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
745
+void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
746
+#endif /* TCG_TARGET_REG_BITS */
747
+
748
+static inline void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg)
749
+{
750
+ if (TCG_TARGET_HAS_neg_i64) {
751
+ tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg);
752
+ } else {
753
+ tcg_gen_subfi_i64(ret, 0, arg);
754
+ }
755
+}
756
+
757
+/* Size changing operations. */
758
+
759
+void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
760
+void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
761
+void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high);
762
+void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
763
+void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
764
+void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg);
765
+void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg);
766
+
767
+void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src);
768
+void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg);
769
+void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi);
770
+
771
+static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
772
+{
773
+ tcg_gen_deposit_i64(ret, lo, hi, 32, 32);
774
+}
775
+
776
+/* Local load/store bit ops */
777
+
778
+void tcg_gen_qemu_ld_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType);
779
+void tcg_gen_qemu_st_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType);
780
+void tcg_gen_qemu_ld_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType);
781
+void tcg_gen_qemu_st_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType);
782
+void tcg_gen_qemu_ld_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType);
783
+void tcg_gen_qemu_st_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType);
784
+
785
+/* Atomic ops */
786
+
787
+void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32,
788
+ TCGArg, MemOp, TCGType);
789
+void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64,
790
+ TCGArg, MemOp, TCGType);
791
+void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
792
+ TCGv_i128, TCGArg, MemOp, TCGType);
793
+
794
+void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32,
795
+ TCGArg, MemOp, TCGType);
796
+void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64,
797
+ TCGArg, MemOp, TCGType);
798
+void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
799
+ TCGv_i128, TCGArg, MemOp, TCGType);
800
+
801
+void tcg_gen_atomic_xchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
802
+ TCGArg, MemOp, TCGType);
803
+void tcg_gen_atomic_xchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
804
+ TCGArg, MemOp, TCGType);
805
+
806
+void tcg_gen_atomic_fetch_add_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
807
+ TCGArg, MemOp, TCGType);
808
+void tcg_gen_atomic_fetch_add_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
809
+ TCGArg, MemOp, TCGType);
810
+void tcg_gen_atomic_fetch_and_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
811
+ TCGArg, MemOp, TCGType);
812
+void tcg_gen_atomic_fetch_and_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
813
+ TCGArg, MemOp, TCGType);
814
+void tcg_gen_atomic_fetch_or_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
815
+ TCGArg, MemOp, TCGType);
816
+void tcg_gen_atomic_fetch_or_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
817
+ TCGArg, MemOp, TCGType);
818
+void tcg_gen_atomic_fetch_xor_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
819
+ TCGArg, MemOp, TCGType);
820
+void tcg_gen_atomic_fetch_xor_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
821
+ TCGArg, MemOp, TCGType);
822
+void tcg_gen_atomic_fetch_smin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
823
+ TCGArg, MemOp, TCGType);
824
+void tcg_gen_atomic_fetch_smin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
825
+ TCGArg, MemOp, TCGType);
826
+void tcg_gen_atomic_fetch_umin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
827
+ TCGArg, MemOp, TCGType);
828
+void tcg_gen_atomic_fetch_umin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
829
+ TCGArg, MemOp, TCGType);
830
+void tcg_gen_atomic_fetch_smax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
831
+ TCGArg, MemOp, TCGType);
832
+void tcg_gen_atomic_fetch_smax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
833
+ TCGArg, MemOp, TCGType);
834
+void tcg_gen_atomic_fetch_umax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
835
+ TCGArg, MemOp, TCGType);
836
+void tcg_gen_atomic_fetch_umax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
837
+ TCGArg, MemOp, TCGType);
838
+
839
+void tcg_gen_atomic_add_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
840
+ TCGArg, MemOp, TCGType);
841
+void tcg_gen_atomic_add_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
842
+ TCGArg, MemOp, TCGType);
843
+void tcg_gen_atomic_and_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
844
+ TCGArg, MemOp, TCGType);
845
+void tcg_gen_atomic_and_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
846
+ TCGArg, MemOp, TCGType);
847
+void tcg_gen_atomic_or_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
848
+ TCGArg, MemOp, TCGType);
849
+void tcg_gen_atomic_or_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
850
+ TCGArg, MemOp, TCGType);
851
+void tcg_gen_atomic_xor_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
852
+ TCGArg, MemOp, TCGType);
853
+void tcg_gen_atomic_xor_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
854
+ TCGArg, MemOp, TCGType);
855
+void tcg_gen_atomic_smin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
856
+ TCGArg, MemOp, TCGType);
857
+void tcg_gen_atomic_smin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
858
+ TCGArg, MemOp, TCGType);
859
+void tcg_gen_atomic_umin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
860
+ TCGArg, MemOp, TCGType);
861
+void tcg_gen_atomic_umin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
862
+ TCGArg, MemOp, TCGType);
863
+void tcg_gen_atomic_smax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
864
+ TCGArg, MemOp, TCGType);
865
+void tcg_gen_atomic_smax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
866
+ TCGArg, MemOp, TCGType);
867
+void tcg_gen_atomic_umax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
868
+ TCGArg, MemOp, TCGType);
869
+void tcg_gen_atomic_umax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
870
+ TCGArg, MemOp, TCGType);
871
+
872
+/* Vector ops */
873
+
874
+void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
875
+void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);
876
+void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec, TCGv_i64);
877
+void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long);
878
+void tcg_gen_dupi_vec(unsigned vece, TCGv_vec, uint64_t);
879
+void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
880
+void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
881
+void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
882
+void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
883
+void tcg_gen_or_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
884
+void tcg_gen_xor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
885
+void tcg_gen_andc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
886
+void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
887
+void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
888
+void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
889
+void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
890
+void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
891
+void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
892
+void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
893
+void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
894
+void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
895
+void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
896
+void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
897
+void tcg_gen_smin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
898
+void tcg_gen_umin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
899
+void tcg_gen_smax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
900
+void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
901
+
902
+void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
903
+void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
904
+void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
905
+void tcg_gen_rotli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
906
+void tcg_gen_rotri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
907
+
908
+void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
909
+void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
910
+void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
911
+void tcg_gen_rotls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
912
+
913
+void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
914
+void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
915
+void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
916
+void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
917
+void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
918
+
919
+void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,
920
+ TCGv_vec a, TCGv_vec b);
921
+
922
+void tcg_gen_bitsel_vec(unsigned vece, TCGv_vec r, TCGv_vec a,
923
+ TCGv_vec b, TCGv_vec c);
924
+void tcg_gen_cmpsel_vec(TCGCond cond, unsigned vece, TCGv_vec r,
925
+ TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d);
926
+
927
+void tcg_gen_ld_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
928
+void tcg_gen_st_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
929
+void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
930
+
931
+/* Host pointer ops */
932
+
933
+#if UINTPTR_MAX == UINT32_MAX
934
+# define PTR i32
935
+# define NAT TCGv_i32
936
+#else
937
+# define PTR i64
938
+# define NAT TCGv_i64
939
+#endif
940
+
941
+static inline void tcg_gen_ld_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
942
+{
943
+ glue(tcg_gen_ld_,PTR)((NAT)r, a, o);
944
+}
945
+
946
+static inline void tcg_gen_st_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
947
+{
948
+ glue(tcg_gen_st_, PTR)((NAT)r, a, o);
949
+}
950
+
951
+static inline void tcg_gen_discard_ptr(TCGv_ptr a)
952
+{
953
+ glue(tcg_gen_discard_,PTR)((NAT)a);
954
+}
955
+
956
+static inline void tcg_gen_add_ptr(TCGv_ptr r, TCGv_ptr a, TCGv_ptr b)
957
+{
958
+ glue(tcg_gen_add_,PTR)((NAT)r, (NAT)a, (NAT)b);
959
+}
960
+
961
+static inline void tcg_gen_addi_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t b)
962
+{
963
+ glue(tcg_gen_addi_,PTR)((NAT)r, (NAT)a, b);
964
+}
965
+
966
+static inline void tcg_gen_mov_ptr(TCGv_ptr d, TCGv_ptr s)
967
+{
968
+ glue(tcg_gen_mov_,PTR)((NAT)d, (NAT)s);
969
+}
970
+
971
+static inline void tcg_gen_movi_ptr(TCGv_ptr d, intptr_t s)
972
+{
973
+ glue(tcg_gen_movi_,PTR)((NAT)d, s);
974
+}
975
+
976
+static inline void tcg_gen_brcondi_ptr(TCGCond cond, TCGv_ptr a,
977
+ intptr_t b, TCGLabel *label)
978
+{
979
+ glue(tcg_gen_brcondi_,PTR)(cond, (NAT)a, b, label);
980
+}
981
+
982
+static inline void tcg_gen_ext_i32_ptr(TCGv_ptr r, TCGv_i32 a)
983
+{
984
+#if UINTPTR_MAX == UINT32_MAX
985
+ tcg_gen_mov_i32((NAT)r, a);
986
+#else
987
+ tcg_gen_ext_i32_i64((NAT)r, a);
988
+#endif
989
+}
990
+
991
+static inline void tcg_gen_trunc_i64_ptr(TCGv_ptr r, TCGv_i64 a)
992
+{
993
+#if UINTPTR_MAX == UINT32_MAX
994
+ tcg_gen_extrl_i64_i32((NAT)r, a);
995
+#else
996
+ tcg_gen_mov_i64((NAT)r, a);
997
+#endif
998
+}
999
+
1000
+static inline void tcg_gen_extu_ptr_i64(TCGv_i64 r, TCGv_ptr a)
1001
+{
1002
+#if UINTPTR_MAX == UINT32_MAX
1003
+ tcg_gen_extu_i32_i64(r, (NAT)a);
1004
+#else
1005
+ tcg_gen_mov_i64(r, (NAT)a);
1006
+#endif
1007
+}
1008
+
1009
+static inline void tcg_gen_trunc_ptr_i32(TCGv_i32 r, TCGv_ptr a)
1010
+{
1011
+#if UINTPTR_MAX == UINT32_MAX
1012
+ tcg_gen_mov_i32(r, (NAT)a);
1013
+#else
1014
+ tcg_gen_extrl_i64_i32(r, (NAT)a);
1015
+#endif
1016
+}
1017
+
1018
+#undef PTR
1019
+#undef NAT
1020
+
1021
+#endif /* TCG_TCG_OP_COMMON_H */
1022
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
1023
index XXXXXXX..XXXXXXX 100644
1024
--- a/include/tcg/tcg-op.h
1025
+++ b/include/tcg/tcg-op.h
1026
@@ -XXX,XX +XXX,XX @@
1027
+/* SPDX-License-Identifier: MIT */
1028
/*
1029
- * Tiny Code Generator for QEMU
1030
+ * Target dependent opcode generation functions.
1031
*
1032
* Copyright (c) 2008 Fabrice Bellard
1033
- *
1034
- * Permission is hereby granted, free of charge, to any person obtaining a copy
1035
- * of this software and associated documentation files (the "Software"), to deal
1036
- * in the Software without restriction, including without limitation the rights
1037
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1038
- * copies of the Software, and to permit persons to whom the Software is
1039
- * furnished to do so, subject to the following conditions:
1040
- *
1041
- * The above copyright notice and this permission notice shall be included in
1042
- * all copies or substantial portions of the Software.
1043
- *
1044
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1045
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1046
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1047
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1048
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
1049
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
1050
- * THE SOFTWARE.
1051
*/
1052
1053
#ifndef TCG_TCG_OP_H
1054
#define TCG_TCG_OP_H
1055
1056
-#include "tcg/tcg.h"
1057
-#include "exec/helper-proto.h"
1058
-#include "exec/helper-gen.h"
1059
-
1060
-/* Basic output routines. Not for general consumption. */
1061
-
1062
-void tcg_gen_op1(TCGOpcode, TCGArg);
1063
-void tcg_gen_op2(TCGOpcode, TCGArg, TCGArg);
1064
-void tcg_gen_op3(TCGOpcode, TCGArg, TCGArg, TCGArg);
1065
-void tcg_gen_op4(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg);
1066
-void tcg_gen_op5(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
1067
-void tcg_gen_op6(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
1068
-
1069
-void vec_gen_2(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg);
1070
-void vec_gen_3(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg);
1071
-void vec_gen_4(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg, TCGArg);
1072
-
1073
-static inline void tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1)
1074
-{
1075
- tcg_gen_op1(opc, tcgv_i32_arg(a1));
1076
-}
1077
-
1078
-static inline void tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1)
1079
-{
1080
- tcg_gen_op1(opc, tcgv_i64_arg(a1));
1081
-}
1082
-
1083
-static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg a1)
1084
-{
1085
- tcg_gen_op1(opc, a1);
1086
-}
1087
-
1088
-static inline void tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2)
1089
-{
1090
- tcg_gen_op2(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2));
1091
-}
1092
-
1093
-static inline void tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2)
1094
-{
1095
- tcg_gen_op2(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2));
1096
-}
1097
-
1098
-static inline void tcg_gen_op2i_i32(TCGOpcode opc, TCGv_i32 a1, TCGArg a2)
1099
-{
1100
- tcg_gen_op2(opc, tcgv_i32_arg(a1), a2);
1101
-}
1102
-
1103
-static inline void tcg_gen_op2i_i64(TCGOpcode opc, TCGv_i64 a1, TCGArg a2)
1104
-{
1105
- tcg_gen_op2(opc, tcgv_i64_arg(a1), a2);
1106
-}
1107
-
1108
-static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg a1, TCGArg a2)
1109
-{
1110
- tcg_gen_op2(opc, a1, a2);
1111
-}
1112
-
1113
-static inline void tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1,
1114
- TCGv_i32 a2, TCGv_i32 a3)
1115
-{
1116
- tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3));
1117
-}
1118
-
1119
-static inline void tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1,
1120
- TCGv_i64 a2, TCGv_i64 a3)
1121
-{
1122
- tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3));
1123
-}
1124
-
1125
-static inline void tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1,
1126
- TCGv_i32 a2, TCGArg a3)
1127
-{
1128
- tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3);
1129
-}
1130
-
1131
-static inline void tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1,
1132
- TCGv_i64 a2, TCGArg a3)
1133
-{
1134
- tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3);
1135
-}
1136
-
1137
-static inline void tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val,
1138
- TCGv_ptr base, TCGArg offset)
1139
-{
1140
- tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_ptr_arg(base), offset);
1141
-}
1142
-
1143
-static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
1144
- TCGv_ptr base, TCGArg offset)
1145
-{
1146
- tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_ptr_arg(base), offset);
1147
-}
1148
-
1149
-static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
1150
- TCGv_i32 a3, TCGv_i32 a4)
1151
-{
1152
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
1153
- tcgv_i32_arg(a3), tcgv_i32_arg(a4));
1154
-}
1155
-
1156
-static inline void tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
1157
- TCGv_i64 a3, TCGv_i64 a4)
1158
-{
1159
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
1160
- tcgv_i64_arg(a3), tcgv_i64_arg(a4));
1161
-}
1162
-
1163
-static inline void tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
1164
- TCGv_i32 a3, TCGArg a4)
1165
-{
1166
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
1167
- tcgv_i32_arg(a3), a4);
1168
-}
1169
-
1170
-static inline void tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
1171
- TCGv_i64 a3, TCGArg a4)
1172
-{
1173
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
1174
- tcgv_i64_arg(a3), a4);
1175
-}
1176
-
1177
-static inline void tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
1178
- TCGArg a3, TCGArg a4)
1179
-{
1180
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4);
1181
-}
1182
-
1183
-static inline void tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
1184
- TCGArg a3, TCGArg a4)
1185
-{
1186
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4);
1187
-}
1188
-
1189
-static inline void tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
1190
- TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5)
1191
-{
1192
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
1193
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5));
1194
-}
1195
-
1196
-static inline void tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
1197
- TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5)
1198
-{
1199
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
1200
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5));
1201
-}
1202
-
1203
-static inline void tcg_gen_op5i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
1204
- TCGv_i32 a3, TCGv_i32 a4, TCGArg a5)
1205
-{
1206
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
1207
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5);
1208
-}
1209
-
1210
-static inline void tcg_gen_op5i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
1211
- TCGv_i64 a3, TCGv_i64 a4, TCGArg a5)
1212
-{
1213
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
1214
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5);
1215
-}
1216
-
1217
-static inline void tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
1218
- TCGv_i32 a3, TCGArg a4, TCGArg a5)
1219
-{
1220
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
1221
- tcgv_i32_arg(a3), a4, a5);
1222
-}
1223
-
1224
-static inline void tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
1225
- TCGv_i64 a3, TCGArg a4, TCGArg a5)
1226
-{
1227
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
1228
- tcgv_i64_arg(a3), a4, a5);
1229
-}
1230
-
1231
-static inline void tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
1232
- TCGv_i32 a3, TCGv_i32 a4,
1233
- TCGv_i32 a5, TCGv_i32 a6)
1234
-{
1235
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
1236
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5),
1237
- tcgv_i32_arg(a6));
1238
-}
1239
-
1240
-static inline void tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
1241
- TCGv_i64 a3, TCGv_i64 a4,
1242
- TCGv_i64 a5, TCGv_i64 a6)
1243
-{
1244
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
1245
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5),
1246
- tcgv_i64_arg(a6));
1247
-}
1248
-
1249
-static inline void tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
1250
- TCGv_i32 a3, TCGv_i32 a4,
1251
- TCGv_i32 a5, TCGArg a6)
1252
-{
1253
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
1254
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), a6);
1255
-}
1256
-
1257
-static inline void tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
1258
- TCGv_i64 a3, TCGv_i64 a4,
1259
- TCGv_i64 a5, TCGArg a6)
1260
-{
1261
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
1262
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), a6);
1263
-}
1264
-
1265
-static inline void tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
1266
- TCGv_i32 a3, TCGv_i32 a4,
1267
- TCGArg a5, TCGArg a6)
1268
-{
1269
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
1270
- tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6);
1271
-}
1272
-
1273
-static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
1274
- TCGv_i64 a3, TCGv_i64 a4,
1275
- TCGArg a5, TCGArg a6)
1276
-{
1277
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
1278
- tcgv_i64_arg(a3), tcgv_i64_arg(a4), a5, a6);
1279
-}
1280
-
1281
-
1282
-/* Generic ops. */
1283
-
1284
-static inline void gen_set_label(TCGLabel *l)
1285
-{
1286
- l->present = 1;
1287
- tcg_gen_op1(INDEX_op_set_label, label_arg(l));
1288
-}
1289
-
1290
-void tcg_gen_br(TCGLabel *l);
1291
-void tcg_gen_mb(TCGBar);
1292
-
1293
-/* Helper calls. */
1294
-
1295
-/* 32 bit ops */
1296
-
1297
-void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg);
1298
-void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1299
-void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2);
1300
-void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1301
-void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1302
-void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1303
-void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1304
-void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1305
-void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1306
-void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1307
-void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1308
-void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1309
-void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1310
-void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1311
-void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1312
-void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1313
-void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1314
-void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1315
-void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1316
-void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1317
-void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1318
-void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1319
-void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
1320
-void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
1321
-void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg);
1322
-void tcg_gen_ctpop_i32(TCGv_i32 a1, TCGv_i32 a2);
1323
-void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1324
-void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1325
-void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
1326
-void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
1327
-void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
1328
- unsigned int ofs, unsigned int len);
1329
-void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
1330
- unsigned int ofs, unsigned int len);
1331
-void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
1332
- unsigned int ofs, unsigned int len);
1333
-void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
1334
- unsigned int ofs, unsigned int len);
1335
-void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah,
1336
- unsigned int ofs);
1337
-void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *);
1338
-void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *);
1339
-void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret,
1340
- TCGv_i32 arg1, TCGv_i32 arg2);
1341
-void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret,
1342
- TCGv_i32 arg1, int32_t arg2);
1343
-void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1,
1344
- TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2);
1345
-void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
1346
- TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
1347
-void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al,
1348
- TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh);
1349
-void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
1350
-void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
1351
-void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2);
1352
-void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg);
1353
-void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg);
1354
-void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg);
1355
-void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg);
1356
-void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags);
1357
-void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg);
1358
-void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg);
1359
-void tcg_gen_smin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
1360
-void tcg_gen_smax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
1361
-void tcg_gen_umin_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
1362
-void tcg_gen_umax_i32(TCGv_i32, TCGv_i32 arg1, TCGv_i32 arg2);
1363
-void tcg_gen_abs_i32(TCGv_i32, TCGv_i32);
1364
-
1365
-/* Replicate a value of size @vece from @in to all the lanes in @out */
1366
-void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in);
1367
-
1368
-static inline void tcg_gen_discard_i32(TCGv_i32 arg)
1369
-{
1370
- tcg_gen_op1_i32(INDEX_op_discard, arg);
1371
-}
1372
-
1373
-static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
1374
-{
1375
- if (ret != arg) {
1376
- tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg);
1377
- }
1378
-}
1379
-
1380
-static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2,
1381
- tcg_target_long offset)
1382
-{
1383
- tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset);
1384
-}
1385
-
1386
-static inline void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2,
1387
- tcg_target_long offset)
1388
-{
1389
- tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset);
1390
-}
1391
-
1392
-static inline void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2,
1393
- tcg_target_long offset)
1394
-{
1395
- tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset);
1396
-}
1397
-
1398
-static inline void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2,
1399
- tcg_target_long offset)
1400
-{
1401
- tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset);
1402
-}
1403
-
1404
-static inline void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2,
1405
- tcg_target_long offset)
1406
-{
1407
- tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset);
1408
-}
1409
-
1410
-static inline void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2,
1411
- tcg_target_long offset)
1412
-{
1413
- tcg_gen_ldst_op_i32(INDEX_op_st8_i32, arg1, arg2, offset);
1414
-}
1415
-
1416
-static inline void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2,
1417
- tcg_target_long offset)
1418
-{
1419
- tcg_gen_ldst_op_i32(INDEX_op_st16_i32, arg1, arg2, offset);
1420
-}
1421
-
1422
-static inline void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2,
1423
- tcg_target_long offset)
1424
-{
1425
- tcg_gen_ldst_op_i32(INDEX_op_st_i32, arg1, arg2, offset);
1426
-}
1427
-
1428
-static inline void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1429
-{
1430
- tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2);
1431
-}
1432
-
1433
-static inline void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1434
-{
1435
- tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2);
1436
-}
1437
-
1438
-static inline void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1439
-{
1440
- tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2);
1441
-}
1442
-
1443
-static inline void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1444
-{
1445
- tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2);
1446
-}
1447
-
1448
-static inline void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1449
-{
1450
- tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2);
1451
-}
1452
-
1453
-static inline void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1454
-{
1455
- tcg_gen_op3_i32(INDEX_op_shl_i32, ret, arg1, arg2);
1456
-}
1457
-
1458
-static inline void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1459
-{
1460
- tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2);
1461
-}
1462
-
1463
-static inline void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1464
-{
1465
- tcg_gen_op3_i32(INDEX_op_sar_i32, ret, arg1, arg2);
1466
-}
1467
-
1468
-static inline void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1469
-{
1470
- tcg_gen_op3_i32(INDEX_op_mul_i32, ret, arg1, arg2);
1471
-}
1472
-
1473
-static inline void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg)
1474
-{
1475
- if (TCG_TARGET_HAS_neg_i32) {
1476
- tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg);
1477
- } else {
1478
- tcg_gen_subfi_i32(ret, 0, arg);
1479
- }
1480
-}
1481
-
1482
-static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg)
1483
-{
1484
- if (TCG_TARGET_HAS_not_i32) {
1485
- tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg);
1486
- } else {
1487
- tcg_gen_xori_i32(ret, arg, -1);
1488
- }
1489
-}
1490
-
1491
-/* 64 bit ops */
1492
-
1493
-void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg);
1494
-void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1495
-void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2);
1496
-void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1497
-void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1498
-void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1499
-void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1500
-void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1501
-void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1502
-void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1503
-void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1504
-void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1505
-void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1506
-void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1507
-void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1508
-void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1509
-void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1510
-void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1511
-void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1512
-void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1513
-void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1514
-void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1515
-void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
1516
-void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
1517
-void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg);
1518
-void tcg_gen_ctpop_i64(TCGv_i64 a1, TCGv_i64 a2);
1519
-void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1520
-void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1521
-void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1522
-void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
1523
-void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
1524
- unsigned int ofs, unsigned int len);
1525
-void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
1526
- unsigned int ofs, unsigned int len);
1527
-void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
1528
- unsigned int ofs, unsigned int len);
1529
-void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
1530
- unsigned int ofs, unsigned int len);
1531
-void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah,
1532
- unsigned int ofs);
1533
-void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *);
1534
-void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *);
1535
-void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret,
1536
- TCGv_i64 arg1, TCGv_i64 arg2);
1537
-void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret,
1538
- TCGv_i64 arg1, int64_t arg2);
1539
-void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1,
1540
- TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2);
1541
-void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
1542
- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
1543
-void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al,
1544
- TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
1545
-void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
1546
-void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
1547
-void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2);
1548
-void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg);
1549
-void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg);
1550
-void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg);
1551
-void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg);
1552
-void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg);
1553
-void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg);
1554
-void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg);
1555
-void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
1556
-void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags);
1557
-void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg);
1558
-void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg);
1559
-void tcg_gen_wswap_i64(TCGv_i64 ret, TCGv_i64 arg);
1560
-void tcg_gen_smin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
1561
-void tcg_gen_smax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
1562
-void tcg_gen_umin_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
1563
-void tcg_gen_umax_i64(TCGv_i64, TCGv_i64 arg1, TCGv_i64 arg2);
1564
-void tcg_gen_abs_i64(TCGv_i64, TCGv_i64);
1565
-
1566
-/* Replicate a value of size @vece from @in to all the lanes in @out */
1567
-void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in);
1568
-
1569
-#if TCG_TARGET_REG_BITS == 64
1570
-static inline void tcg_gen_discard_i64(TCGv_i64 arg)
1571
-{
1572
- tcg_gen_op1_i64(INDEX_op_discard, arg);
1573
-}
1574
-
1575
-static inline void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg)
1576
-{
1577
- if (ret != arg) {
1578
- tcg_gen_op2_i64(INDEX_op_mov_i64, ret, arg);
1579
- }
1580
-}
1581
-
1582
-static inline void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2,
1583
- tcg_target_long offset)
1584
-{
1585
- tcg_gen_ldst_op_i64(INDEX_op_ld8u_i64, ret, arg2, offset);
1586
-}
1587
-
1588
-static inline void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2,
1589
- tcg_target_long offset)
1590
-{
1591
- tcg_gen_ldst_op_i64(INDEX_op_ld8s_i64, ret, arg2, offset);
1592
-}
1593
-
1594
-static inline void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2,
1595
- tcg_target_long offset)
1596
-{
1597
- tcg_gen_ldst_op_i64(INDEX_op_ld16u_i64, ret, arg2, offset);
1598
-}
1599
-
1600
-static inline void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2,
1601
- tcg_target_long offset)
1602
-{
1603
- tcg_gen_ldst_op_i64(INDEX_op_ld16s_i64, ret, arg2, offset);
1604
-}
1605
-
1606
-static inline void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2,
1607
- tcg_target_long offset)
1608
-{
1609
- tcg_gen_ldst_op_i64(INDEX_op_ld32u_i64, ret, arg2, offset);
1610
-}
1611
-
1612
-static inline void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2,
1613
- tcg_target_long offset)
1614
-{
1615
- tcg_gen_ldst_op_i64(INDEX_op_ld32s_i64, ret, arg2, offset);
1616
-}
1617
-
1618
-static inline void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2,
1619
- tcg_target_long offset)
1620
-{
1621
- tcg_gen_ldst_op_i64(INDEX_op_ld_i64, ret, arg2, offset);
1622
-}
1623
-
1624
-static inline void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2,
1625
- tcg_target_long offset)
1626
-{
1627
- tcg_gen_ldst_op_i64(INDEX_op_st8_i64, arg1, arg2, offset);
1628
-}
1629
-
1630
-static inline void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2,
1631
- tcg_target_long offset)
1632
-{
1633
- tcg_gen_ldst_op_i64(INDEX_op_st16_i64, arg1, arg2, offset);
1634
-}
1635
-
1636
-static inline void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2,
1637
- tcg_target_long offset)
1638
-{
1639
- tcg_gen_ldst_op_i64(INDEX_op_st32_i64, arg1, arg2, offset);
1640
-}
1641
-
1642
-static inline void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2,
1643
- tcg_target_long offset)
1644
-{
1645
- tcg_gen_ldst_op_i64(INDEX_op_st_i64, arg1, arg2, offset);
1646
-}
1647
-
1648
-static inline void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1649
-{
1650
- tcg_gen_op3_i64(INDEX_op_add_i64, ret, arg1, arg2);
1651
-}
1652
-
1653
-static inline void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1654
-{
1655
- tcg_gen_op3_i64(INDEX_op_sub_i64, ret, arg1, arg2);
1656
-}
1657
-
1658
-static inline void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1659
-{
1660
- tcg_gen_op3_i64(INDEX_op_and_i64, ret, arg1, arg2);
1661
-}
1662
-
1663
-static inline void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1664
-{
1665
- tcg_gen_op3_i64(INDEX_op_or_i64, ret, arg1, arg2);
1666
-}
1667
-
1668
-static inline void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1669
-{
1670
- tcg_gen_op3_i64(INDEX_op_xor_i64, ret, arg1, arg2);
1671
-}
1672
-
1673
-static inline void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1674
-{
1675
- tcg_gen_op3_i64(INDEX_op_shl_i64, ret, arg1, arg2);
1676
-}
1677
-
1678
-static inline void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1679
-{
1680
- tcg_gen_op3_i64(INDEX_op_shr_i64, ret, arg1, arg2);
1681
-}
1682
-
1683
-static inline void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1684
-{
1685
- tcg_gen_op3_i64(INDEX_op_sar_i64, ret, arg1, arg2);
1686
-}
1687
-
1688
-static inline void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1689
-{
1690
- tcg_gen_op3_i64(INDEX_op_mul_i64, ret, arg1, arg2);
1691
-}
1692
-#else /* TCG_TARGET_REG_BITS == 32 */
1693
-void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
1694
-void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
1695
-void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
1696
-
1697
-void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1698
-void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1699
-
1700
-void tcg_gen_discard_i64(TCGv_i64 arg);
1701
-void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg);
1702
-void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
1703
-void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
1704
-void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
1705
-void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
1706
-void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
1707
-void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
1708
-void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset);
1709
-void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset);
1710
-void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1711
-void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1712
-void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1713
-void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1714
-void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1715
-void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1716
-void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
1717
-#endif /* TCG_TARGET_REG_BITS */
1718
-
1719
-static inline void tcg_gen_neg_i64(TCGv_i64 ret, TCGv_i64 arg)
1720
-{
1721
- if (TCG_TARGET_HAS_neg_i64) {
1722
- tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg);
1723
- } else {
1724
- tcg_gen_subfi_i64(ret, 0, arg);
1725
- }
1726
-}
1727
-
1728
-/* Size changing operations. */
1729
-
1730
-void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
1731
-void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg);
1732
-void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high);
1733
-void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
1734
-void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
1735
-void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg);
1736
-void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg);
1737
-
1738
-void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src);
1739
-void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg);
1740
-void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi);
1741
-
1742
-static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
1743
-{
1744
- tcg_gen_deposit_i64(ret, lo, hi, 32, 32);
1745
-}
1746
-
1747
-/* QEMU specific operations. */
1748
+#include "tcg/tcg-op-common.h"
1749
1750
#ifndef TARGET_LONG_BITS
1751
#error must include QEMU headers
1752
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
1753
# error "Unhandled number of operands to insn_start"
1754
#endif
1755
1756
-/**
1757
- * tcg_gen_exit_tb() - output exit_tb TCG operation
1758
- * @tb: The TranslationBlock from which we are exiting
1759
- * @idx: Direct jump slot index, or exit request
1760
- *
1761
- * See tcg/README for more info about this TCG operation.
1762
- * See also tcg.h and the block comment above TB_EXIT_MASK.
1763
- *
1764
- * For a normal exit from the TB, back to the main loop, @tb should
1765
- * be NULL and @idx should be 0. Otherwise, @tb should be valid and
1766
- * @idx should be one of the TB_EXIT_ values.
1767
- */
1768
-void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx);
1769
-
1770
-/**
1771
- * tcg_gen_goto_tb() - output goto_tb TCG operation
1772
- * @idx: Direct jump slot index (0 or 1)
1773
- *
1774
- * See tcg/README for more info about this TCG operation.
1775
- *
1776
- * NOTE: In softmmu emulation, direct jumps with goto_tb are only safe within
1777
- * the pages this TB resides in because we don't take care of direct jumps when
1778
- * address mapping changes, e.g. in tlb_flush(). In user mode, there's only a
1779
- * static address translation, so the destination address is always valid, TBs
1780
- * are always invalidated properly, and direct jumps are reset when mapping
1781
- * changes.
1782
- */
1783
-void tcg_gen_goto_tb(unsigned idx);
1784
-
1785
-/**
1786
- * tcg_gen_lookup_and_goto_ptr() - look up the current TB, jump to it if valid
1787
- * @addr: Guest address of the target TB
1788
- *
1789
- * If the TB is not valid, jump to the epilogue.
1790
- *
1791
- * This operation is optional. If the TCG backend does not implement goto_ptr,
1792
- * this op is equivalent to calling tcg_gen_exit_tb() with 0 as the argument.
1793
- */
1794
-void tcg_gen_lookup_and_goto_ptr(void);
1795
-
1796
-static inline void tcg_gen_plugin_cb_start(unsigned from, unsigned type,
1797
- unsigned wr)
1798
-{
1799
- tcg_gen_op3(INDEX_op_plugin_cb_start, from, type, wr);
1800
-}
1801
-
1802
-static inline void tcg_gen_plugin_cb_end(void)
1803
-{
1804
- tcg_emit_op(INDEX_op_plugin_cb_end, 0);
1805
-}
1806
-
1807
#if TARGET_LONG_BITS == 32
1808
typedef TCGv_i32 TCGv;
1809
#define tcg_temp_new() tcg_temp_new_i32()
1810
@@ -XXX,XX +XXX,XX @@ typedef TCGv_i64 TCGv;
1811
#error Unhandled TARGET_LONG_BITS value
1812
#endif
1813
1814
-void tcg_gen_qemu_ld_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType);
1815
-void tcg_gen_qemu_st_i32_chk(TCGv_i32, TCGTemp *, TCGArg, MemOp, TCGType);
1816
-void tcg_gen_qemu_ld_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType);
1817
-void tcg_gen_qemu_st_i64_chk(TCGv_i64, TCGTemp *, TCGArg, MemOp, TCGType);
1818
-void tcg_gen_qemu_ld_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType);
1819
-void tcg_gen_qemu_st_i128_chk(TCGv_i128, TCGTemp *, TCGArg, MemOp, TCGType);
1820
-
1821
static inline void
1822
tcg_gen_qemu_ld_i32(TCGv_i32 v, TCGv a, TCGArg i, MemOp m)
1823
{
1824
@@ -XXX,XX +XXX,XX @@ tcg_gen_qemu_st_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m)
1825
tcg_gen_qemu_st_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL);
1826
}
1827
1828
-void tcg_gen_atomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32,
1829
- TCGArg, MemOp, TCGType);
1830
-void tcg_gen_atomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64,
1831
- TCGArg, MemOp, TCGType);
1832
-void tcg_gen_atomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
1833
- TCGv_i128, TCGArg, MemOp, TCGType);
1834
-
1835
-void tcg_gen_nonatomic_cmpxchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGv_i32,
1836
- TCGArg, MemOp, TCGType);
1837
-void tcg_gen_nonatomic_cmpxchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGv_i64,
1838
- TCGArg, MemOp, TCGType);
1839
-void tcg_gen_nonatomic_cmpxchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128,
1840
- TCGv_i128, TCGArg, MemOp, TCGType);
1841
-
1842
-void tcg_gen_atomic_xchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1843
- TCGArg, MemOp, TCGType);
1844
-void tcg_gen_atomic_xchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1845
- TCGArg, MemOp, TCGType);
1846
-
1847
-void tcg_gen_atomic_fetch_add_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1848
- TCGArg, MemOp, TCGType);
1849
-void tcg_gen_atomic_fetch_add_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1850
- TCGArg, MemOp, TCGType);
1851
-void tcg_gen_atomic_fetch_and_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1852
- TCGArg, MemOp, TCGType);
1853
-void tcg_gen_atomic_fetch_and_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1854
- TCGArg, MemOp, TCGType);
1855
-void tcg_gen_atomic_fetch_or_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1856
- TCGArg, MemOp, TCGType);
1857
-void tcg_gen_atomic_fetch_or_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1858
- TCGArg, MemOp, TCGType);
1859
-void tcg_gen_atomic_fetch_xor_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1860
- TCGArg, MemOp, TCGType);
1861
-void tcg_gen_atomic_fetch_xor_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1862
- TCGArg, MemOp, TCGType);
1863
-void tcg_gen_atomic_fetch_smin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1864
- TCGArg, MemOp, TCGType);
1865
-void tcg_gen_atomic_fetch_smin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1866
- TCGArg, MemOp, TCGType);
1867
-void tcg_gen_atomic_fetch_umin_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1868
- TCGArg, MemOp, TCGType);
1869
-void tcg_gen_atomic_fetch_umin_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1870
- TCGArg, MemOp, TCGType);
1871
-void tcg_gen_atomic_fetch_smax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1872
- TCGArg, MemOp, TCGType);
1873
-void tcg_gen_atomic_fetch_smax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1874
- TCGArg, MemOp, TCGType);
1875
-void tcg_gen_atomic_fetch_umax_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1876
- TCGArg, MemOp, TCGType);
1877
-void tcg_gen_atomic_fetch_umax_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1878
- TCGArg, MemOp, TCGType);
1879
-
1880
-void tcg_gen_atomic_add_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1881
- TCGArg, MemOp, TCGType);
1882
-void tcg_gen_atomic_add_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1883
- TCGArg, MemOp, TCGType);
1884
-void tcg_gen_atomic_and_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1885
- TCGArg, MemOp, TCGType);
1886
-void tcg_gen_atomic_and_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1887
- TCGArg, MemOp, TCGType);
1888
-void tcg_gen_atomic_or_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1889
- TCGArg, MemOp, TCGType);
1890
-void tcg_gen_atomic_or_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1891
- TCGArg, MemOp, TCGType);
1892
-void tcg_gen_atomic_xor_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1893
- TCGArg, MemOp, TCGType);
1894
-void tcg_gen_atomic_xor_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1895
- TCGArg, MemOp, TCGType);
1896
-void tcg_gen_atomic_smin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1897
- TCGArg, MemOp, TCGType);
1898
-void tcg_gen_atomic_smin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1899
- TCGArg, MemOp, TCGType);
1900
-void tcg_gen_atomic_umin_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1901
- TCGArg, MemOp, TCGType);
1902
-void tcg_gen_atomic_umin_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1903
- TCGArg, MemOp, TCGType);
1904
-void tcg_gen_atomic_smax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1905
- TCGArg, MemOp, TCGType);
1906
-void tcg_gen_atomic_smax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1907
- TCGArg, MemOp, TCGType);
1908
-void tcg_gen_atomic_umax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32,
1909
- TCGArg, MemOp, TCGType);
1910
-void tcg_gen_atomic_umax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64,
1911
- TCGArg, MemOp, TCGType);
1912
-
1913
#define DEF_ATOMIC2(N, S) \
1914
static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S v, \
1915
TCGArg i, MemOp m) \
1916
@@ -XXX,XX +XXX,XX @@ DEF_ATOMIC2(tcg_gen_atomic_umax_fetch, i64)
1917
#undef DEF_ATOMIC2
1918
#undef DEF_ATOMIC3
1919
1920
-void tcg_gen_mov_vec(TCGv_vec, TCGv_vec);
1921
-void tcg_gen_dup_i32_vec(unsigned vece, TCGv_vec, TCGv_i32);
1922
-void tcg_gen_dup_i64_vec(unsigned vece, TCGv_vec, TCGv_i64);
1923
-void tcg_gen_dup_mem_vec(unsigned vece, TCGv_vec, TCGv_ptr, tcg_target_long);
1924
-void tcg_gen_dupi_vec(unsigned vece, TCGv_vec, uint64_t);
1925
-void tcg_gen_add_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1926
-void tcg_gen_sub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1927
-void tcg_gen_mul_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1928
-void tcg_gen_and_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1929
-void tcg_gen_or_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1930
-void tcg_gen_xor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1931
-void tcg_gen_andc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1932
-void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1933
-void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1934
-void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1935
-void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1936
-void tcg_gen_not_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
1937
-void tcg_gen_neg_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
1938
-void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a);
1939
-void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1940
-void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1941
-void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1942
-void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1943
-void tcg_gen_smin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1944
-void tcg_gen_umin_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1945
-void tcg_gen_smax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1946
-void tcg_gen_umax_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b);
1947
-
1948
-void tcg_gen_shli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
1949
-void tcg_gen_shri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
1950
-void tcg_gen_sari_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
1951
-void tcg_gen_rotli_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
1952
-void tcg_gen_rotri_vec(unsigned vece, TCGv_vec r, TCGv_vec a, int64_t i);
1953
-
1954
-void tcg_gen_shls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
1955
-void tcg_gen_shrs_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
1956
-void tcg_gen_sars_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
1957
-void tcg_gen_rotls_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_i32 s);
1958
-
1959
-void tcg_gen_shlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
1960
-void tcg_gen_shrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
1961
-void tcg_gen_sarv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
1962
-void tcg_gen_rotlv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
1963
-void tcg_gen_rotrv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec s);
1964
-
1965
-void tcg_gen_cmp_vec(TCGCond cond, unsigned vece, TCGv_vec r,
1966
- TCGv_vec a, TCGv_vec b);
1967
-
1968
-void tcg_gen_bitsel_vec(unsigned vece, TCGv_vec r, TCGv_vec a,
1969
- TCGv_vec b, TCGv_vec c);
1970
-void tcg_gen_cmpsel_vec(TCGCond cond, unsigned vece, TCGv_vec r,
1971
- TCGv_vec a, TCGv_vec b, TCGv_vec c, TCGv_vec d);
1972
-
1973
-void tcg_gen_ld_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
1974
-void tcg_gen_st_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset);
1975
-void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
1976
-
1977
#if TARGET_LONG_BITS == 64
1978
#define tcg_gen_movi_tl tcg_gen_movi_i64
1979
#define tcg_gen_mov_tl tcg_gen_mov_i64
1980
@@ -XXX,XX +XXX,XX @@ void tcg_gen_stl_vec(TCGv_vec r, TCGv_ptr base, TCGArg offset, TCGType t);
1981
: (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \
1982
: (qemu_build_not_reached_always(), 0)) \
1983
: (target_long)dup_const(VECE, C))
1984
-#endif
1985
-
1986
-#if UINTPTR_MAX == UINT32_MAX
1987
-# define PTR i32
1988
-# define NAT TCGv_i32
1989
-#else
1990
-# define PTR i64
1991
-# define NAT TCGv_i64
1992
-#endif
1993
-
1994
-static inline void tcg_gen_ld_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
1995
-{
1996
- glue(tcg_gen_ld_,PTR)((NAT)r, a, o);
1997
-}
1998
-
1999
-static inline void tcg_gen_st_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o)
2000
-{
2001
- glue(tcg_gen_st_, PTR)((NAT)r, a, o);
2002
-}
2003
-
2004
-static inline void tcg_gen_discard_ptr(TCGv_ptr a)
2005
-{
2006
- glue(tcg_gen_discard_,PTR)((NAT)a);
2007
-}
2008
-
2009
-static inline void tcg_gen_add_ptr(TCGv_ptr r, TCGv_ptr a, TCGv_ptr b)
2010
-{
2011
- glue(tcg_gen_add_,PTR)((NAT)r, (NAT)a, (NAT)b);
2012
-}
2013
-
2014
-static inline void tcg_gen_addi_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t b)
2015
-{
2016
- glue(tcg_gen_addi_,PTR)((NAT)r, (NAT)a, b);
2017
-}
2018
-
2019
-static inline void tcg_gen_mov_ptr(TCGv_ptr d, TCGv_ptr s)
2020
-{
2021
- glue(tcg_gen_mov_,PTR)((NAT)d, (NAT)s);
2022
-}
2023
-
2024
-static inline void tcg_gen_movi_ptr(TCGv_ptr d, intptr_t s)
2025
-{
2026
- glue(tcg_gen_movi_,PTR)((NAT)d, s);
2027
-}
2028
-
2029
-static inline void tcg_gen_brcondi_ptr(TCGCond cond, TCGv_ptr a,
2030
- intptr_t b, TCGLabel *label)
2031
-{
2032
- glue(tcg_gen_brcondi_,PTR)(cond, (NAT)a, b, label);
2033
-}
2034
-
2035
-static inline void tcg_gen_ext_i32_ptr(TCGv_ptr r, TCGv_i32 a)
2036
-{
2037
-#if UINTPTR_MAX == UINT32_MAX
2038
- tcg_gen_mov_i32((NAT)r, a);
2039
-#else
2040
- tcg_gen_ext_i32_i64((NAT)r, a);
2041
-#endif
2042
-}
2043
-
2044
-static inline void tcg_gen_trunc_i64_ptr(TCGv_ptr r, TCGv_i64 a)
2045
-{
2046
-#if UINTPTR_MAX == UINT32_MAX
2047
- tcg_gen_extrl_i64_i32((NAT)r, a);
2048
-#else
2049
- tcg_gen_mov_i64((NAT)r, a);
2050
-#endif
2051
-}
2052
-
2053
-static inline void tcg_gen_extu_ptr_i64(TCGv_i64 r, TCGv_ptr a)
2054
-{
2055
-#if UINTPTR_MAX == UINT32_MAX
2056
- tcg_gen_extu_i32_i64(r, (NAT)a);
2057
-#else
2058
- tcg_gen_mov_i64(r, (NAT)a);
2059
-#endif
2060
-}
2061
-
2062
-static inline void tcg_gen_trunc_ptr_i32(TCGv_i32 r, TCGv_ptr a)
2063
-{
2064
-#if UINTPTR_MAX == UINT32_MAX
2065
- tcg_gen_mov_i32(r, (NAT)a);
2066
-#else
2067
- tcg_gen_extrl_i64_i32(r, (NAT)a);
2068
-#endif
2069
-}
2070
-
2071
-#undef PTR
2072
-#undef NAT
2073
2074
+#endif /* TARGET_LONG_BITS == 64 */
2075
#endif /* TCG_TCG_OP_H */
2076
diff --git a/tcg/optimize.c b/tcg/optimize.c
2077
index XXXXXXX..XXXXXXX 100644
2078
--- a/tcg/optimize.c
2079
+++ b/tcg/optimize.c
2080
@@ -XXX,XX +XXX,XX @@
2081
2082
#include "qemu/osdep.h"
2083
#include "qemu/int128.h"
2084
-#include "tcg/tcg-op.h"
2085
+#include "tcg/tcg-op-common.h"
2086
#include "tcg-internal.h"
2087
2088
#define CASE_OP_32_64(x) \
2089
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
2090
index XXXXXXX..XXXXXXX 100644
2091
--- a/tcg/tcg-op-gvec.c
2092
+++ b/tcg/tcg-op-gvec.c
2093
@@ -XXX,XX +XXX,XX @@
2094
#include "qemu/osdep.h"
2095
#include "tcg/tcg.h"
2096
#include "tcg/tcg-temp-internal.h"
2097
-#include "tcg/tcg-op.h"
2098
+#include "tcg/tcg-op-common.h"
2099
#include "tcg/tcg-op-gvec.h"
2100
#include "tcg/tcg-gvec-desc.h"
2101
2102
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
2103
index XXXXXXX..XXXXXXX 100644
2104
--- a/tcg/tcg-op-ldst.c
2105
+++ b/tcg/tcg-op-ldst.c
2106
@@ -XXX,XX +XXX,XX @@
2107
#include "exec/exec-all.h"
2108
#include "tcg/tcg.h"
2109
#include "tcg/tcg-temp-internal.h"
2110
-#include "tcg/tcg-op.h"
2111
+#include "tcg/tcg-op-common.h"
2112
#include "tcg/tcg-mo.h"
2113
#include "exec/plugin-gen.h"
2114
#include "tcg-internal.h"
2115
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
2116
index XXXXXXX..XXXXXXX 100644
2117
--- a/tcg/tcg-op-vec.c
2118
+++ b/tcg/tcg-op-vec.c
2119
@@ -XXX,XX +XXX,XX @@
2120
#include "qemu/osdep.h"
2121
#include "tcg/tcg.h"
2122
#include "tcg/tcg-temp-internal.h"
2123
-#include "tcg/tcg-op.h"
2124
+#include "tcg/tcg-op-common.h"
2125
#include "tcg/tcg-mo.h"
2126
#include "tcg-internal.h"
2127
2128
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
2129
index XXXXXXX..XXXXXXX 100644
2130
--- a/tcg/tcg-op.c
2131
+++ b/tcg/tcg-op.c
2132
@@ -XXX,XX +XXX,XX @@
2133
#include "exec/exec-all.h"
2134
#include "tcg/tcg.h"
2135
#include "tcg/tcg-temp-internal.h"
2136
-#include "tcg/tcg-op.h"
2137
+#include "tcg/tcg-op-common.h"
2138
#include "exec/plugin-gen.h"
2139
#include "tcg-internal.h"
2140
2141
diff --git a/tcg/tcg.c b/tcg/tcg.c
2142
index XXXXXXX..XXXXXXX 100644
2143
--- a/tcg/tcg.c
2144
+++ b/tcg/tcg.c
2145
@@ -XXX,XX +XXX,XX @@
2146
2147
#include "exec/exec-all.h"
2148
#include "exec/tlb-common.h"
2149
-#include "tcg/tcg-op.h"
2150
+#include "tcg/tcg-op-common.h"
2151
2152
#if UINTPTR_MAX == UINT32_MAX
2153
# define ELF_CLASS ELFCLASS32
2154
diff --git a/tcg/tci.c b/tcg/tci.c
2155
index XXXXXXX..XXXXXXX 100644
2156
--- a/tcg/tci.c
2157
+++ b/tcg/tci.c
2158
@@ -XXX,XX +XXX,XX @@
2159
*/
2160
2161
#include "qemu/osdep.h"
2162
-#include "exec/cpu_ldst.h"
2163
-#include "tcg/tcg-op.h"
2164
+#include "tcg/tcg.h"
2165
#include "tcg/tcg-ldst.h"
2166
#include <ffi.h>
2167
2168
--
2169
2.34.1
2170
2171
diff view generated by jsdifflib
New patch
1
This had been included via tcg-op-common.h via tcg-op.h,
2
but that is going away.
1
3
4
It is needed for inlines within translator.h, so we might as well
5
do it there and not individually in each translator c file.
6
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
target/arm/tcg/translate.h | 1 +
11
target/arm/tcg/translate-a64.c | 2 --
12
target/arm/tcg/translate-sme.c | 1 -
13
target/arm/tcg/translate-sve.c | 2 --
14
target/arm/tcg/translate.c | 2 --
15
5 files changed, 1 insertion(+), 7 deletions(-)
16
17
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/arm/tcg/translate.h
20
+++ b/target/arm/tcg/translate.h
21
@@ -XXX,XX +XXX,XX @@
22
#define TARGET_ARM_TRANSLATE_H
23
24
#include "exec/translator.h"
25
+#include "exec/helper-gen.h"
26
#include "internals.h"
27
28
29
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/arm/tcg/translate-a64.c
32
+++ b/target/arm/tcg/translate-a64.c
33
@@ -XXX,XX +XXX,XX @@
34
#include "qemu/host-utils.h"
35
#include "semihosting/semihost.h"
36
#include "exec/gen-icount.h"
37
-#include "exec/helper-proto.h"
38
-#include "exec/helper-gen.h"
39
#include "exec/log.h"
40
#include "cpregs.h"
41
#include "translate-a64.h"
42
diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/arm/tcg/translate-sme.c
45
+++ b/target/arm/tcg/translate-sme.c
46
@@ -XXX,XX +XXX,XX @@
47
#include "tcg/tcg-op-gvec.h"
48
#include "tcg/tcg-gvec-desc.h"
49
#include "translate.h"
50
-#include "exec/helper-gen.h"
51
#include "translate-a64.h"
52
#include "fpu/softfloat.h"
53
54
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/arm/tcg/translate-sve.c
57
+++ b/target/arm/tcg/translate-sve.c
58
@@ -XXX,XX +XXX,XX @@
59
#include "arm_ldst.h"
60
#include "translate.h"
61
#include "internals.h"
62
-#include "exec/helper-proto.h"
63
-#include "exec/helper-gen.h"
64
#include "exec/log.h"
65
#include "translate-a64.h"
66
#include "fpu/softfloat.h"
67
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/arm/tcg/translate.c
70
+++ b/target/arm/tcg/translate.c
71
@@ -XXX,XX +XXX,XX @@
72
#include "qemu/bitops.h"
73
#include "arm_ldst.h"
74
#include "semihosting/semihost.h"
75
-#include "exec/helper-proto.h"
76
-#include "exec/helper-gen.h"
77
#include "exec/log.h"
78
#include "cpregs.h"
79
80
--
81
2.34.1
82
83
diff view generated by jsdifflib
New patch
1
This had been included via tcg-op-common.h via tcg-op.h,
2
but that is going away. In idef-parser.y, shuffle some
3
tcg related includes into a more logical order.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
target/hexagon/genptr.c | 1 +
9
target/hexagon/translate.c | 1 +
10
target/hexagon/idef-parser/idef-parser.y | 3 ++-
11
3 files changed, 4 insertions(+), 1 deletion(-)
12
13
diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/hexagon/genptr.c
16
+++ b/target/hexagon/genptr.c
17
@@ -XXX,XX +XXX,XX @@
18
#include "internal.h"
19
#include "tcg/tcg-op.h"
20
#include "tcg/tcg-op-gvec.h"
21
+#include "exec/helper-gen.h"
22
#include "insn.h"
23
#include "opcodes.h"
24
#include "translate.h"
25
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/hexagon/translate.c
28
+++ b/target/hexagon/translate.c
29
@@ -XXX,XX +XXX,XX @@
30
#include "cpu.h"
31
#include "tcg/tcg-op.h"
32
#include "tcg/tcg-op-gvec.h"
33
+#include "exec/helper-gen.h"
34
#include "exec/cpu_ldst.h"
35
#include "exec/log.h"
36
#include "internal.h"
37
diff --git a/target/hexagon/idef-parser/idef-parser.y b/target/hexagon/idef-parser/idef-parser.y
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/hexagon/idef-parser/idef-parser.y
40
+++ b/target/hexagon/idef-parser/idef-parser.y
41
@@ -XXX,XX +XXX,XX @@ int main(int argc, char **argv)
42
fputs("#include \"qemu/log.h\"\n", output_file);
43
fputs("#include \"cpu.h\"\n", output_file);
44
fputs("#include \"internal.h\"\n", output_file);
45
+ fputs("#include \"tcg/tcg.h\"\n", output_file);
46
fputs("#include \"tcg/tcg-op.h\"\n", output_file);
47
+ fputs("#include \"exec/helper-gen.h\"\n", output_file);
48
fputs("#include \"insn.h\"\n", output_file);
49
fputs("#include \"opcodes.h\"\n", output_file);
50
fputs("#include \"translate.h\"\n", output_file);
51
fputs("#define QEMU_GENERATE\n", output_file);
52
fputs("#include \"genptr.h\"\n", output_file);
53
- fputs("#include \"tcg/tcg.h\"\n", output_file);
54
fputs("#include \"macros.h\"\n", output_file);
55
fprintf(output_file, "#include \"%s\"\n", argv[ARG_INDEX_EMITTER_H]);
56
57
--
58
2.34.1
59
60
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
include/exec/helper-head.h | 18 +++---------------
5
1 file changed, 3 insertions(+), 15 deletions(-)
1
6
7
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h
8
index XXXXXXX..XXXXXXX 100644
9
--- a/include/exec/helper-head.h
10
+++ b/include/exec/helper-head.h
11
@@ -XXX,XX +XXX,XX @@
12
-/* Helper file for declaring TCG helper functions.
13
- Used by other helper files.
14
-
15
- Targets should use DEF_HELPER_N and DEF_HELPER_FLAGS_N to declare helper
16
- functions. Names should be specified without the helper_ prefix, and
17
- the return and argument types specified. 3 basic types are understood
18
- (i32, i64 and ptr). Additional aliases are provided for convenience and
19
- to match the types used by the C helper implementation.
20
-
21
- The target helper.h should be included in all files that use/define
22
- helper functions. THis will ensure that function prototypes are
23
- consistent. In addition it should be included an extra two times for
24
- helper.c, defining:
25
- GEN_HELPER 1 to produce op generation functions (gen_helper_*)
26
- GEN_HELPER 2 to do runtime registration helper functions.
27
+/*
28
+ * Helper file for declaring TCG helper functions.
29
+ * Used by other helper files.
30
*/
31
32
#ifndef EXEC_HELPER_HEAD_H
33
--
34
2.34.1
35
36
diff view generated by jsdifflib
1
Wrap the bare TranslationBlock pointer into a structure.
1
This will be required outside of tcg-internal.h soon.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/tb-hash.h | 1 +
6
include/tcg/helper-info.h | 59 +++++++++++++++++++++++++++++++++++++++
8
accel/tcg/tb-jmp-cache.h | 24 ++++++++++++++++++++++++
7
tcg/tcg-internal.h | 47 +------------------------------
9
include/exec/cpu-common.h | 1 +
8
2 files changed, 60 insertions(+), 46 deletions(-)
10
include/hw/core/cpu.h | 15 +--------------
9
create mode 100644 include/tcg/helper-info.h
11
include/qemu/typedefs.h | 1 +
12
accel/stubs/tcg-stub.c | 4 ++++
13
accel/tcg/cpu-exec.c | 10 +++++++---
14
accel/tcg/cputlb.c | 9 +++++----
15
accel/tcg/translate-all.c | 28 +++++++++++++++++++++++++---
16
hw/core/cpu-common.c | 3 +--
17
plugins/core.c | 2 +-
18
trace/control-target.c | 2 +-
19
12 files changed, 72 insertions(+), 28 deletions(-)
20
create mode 100644 accel/tcg/tb-jmp-cache.h
21
10
22
diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h
11
diff --git a/include/tcg/helper-info.h b/include/tcg/helper-info.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/accel/tcg/tb-hash.h
25
+++ b/accel/tcg/tb-hash.h
26
@@ -XXX,XX +XXX,XX @@
27
#include "exec/cpu-defs.h"
28
#include "exec/exec-all.h"
29
#include "qemu/xxhash.h"
30
+#include "tb-jmp-cache.h"
31
32
#ifdef CONFIG_SOFTMMU
33
34
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
35
new file mode 100644
12
new file mode 100644
36
index XXXXXXX..XXXXXXX
13
index XXXXXXX..XXXXXXX
37
--- /dev/null
14
--- /dev/null
38
+++ b/accel/tcg/tb-jmp-cache.h
15
+++ b/include/tcg/helper-info.h
39
@@ -XXX,XX +XXX,XX @@
16
@@ -XXX,XX +XXX,XX @@
40
+/*
17
+/*
41
+ * The per-CPU TranslationBlock jump cache.
18
+ * TCG Helper Infomation Structure
42
+ *
19
+ *
43
+ * Copyright (c) 2003 Fabrice Bellard
20
+ * Copyright (c) 2023 Linaro Ltd
44
+ *
21
+ *
45
+ * SPDX-License-Identifier: GPL-2.0-or-later
22
+ * SPDX-License-Identifier: GPL-2.0-or-later
46
+ */
23
+ */
47
+
24
+
48
+#ifndef ACCEL_TCG_TB_JMP_CACHE_H
25
+#ifndef TCG_HELPER_INFO_H
49
+#define ACCEL_TCG_TB_JMP_CACHE_H
26
+#define TCG_HELPER_INFO_H
50
+
27
+
51
+#define TB_JMP_CACHE_BITS 12
28
+#ifdef CONFIG_TCG_INTERPRETER
52
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
29
+#include <ffi.h>
30
+#endif
53
+
31
+
54
+/*
32
+/*
55
+ * Accessed in parallel; all accesses to 'tb' must be atomic.
33
+ * Describe the calling convention of a given argument type.
56
+ */
34
+ */
57
+struct CPUJumpCache {
35
+typedef enum {
58
+ struct {
36
+ TCG_CALL_RET_NORMAL, /* by registers */
59
+ TranslationBlock *tb;
37
+ TCG_CALL_RET_BY_REF, /* for i128, by reference */
60
+ } array[TB_JMP_CACHE_SIZE];
38
+ TCG_CALL_RET_BY_VEC, /* for i128, by vector register */
61
+};
39
+} TCGCallReturnKind;
62
+
40
+
63
+#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
41
+typedef enum {
64
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
42
+ TCG_CALL_ARG_NORMAL, /* by registers (continuing onto stack) */
43
+ TCG_CALL_ARG_EVEN, /* like normal, but skipping odd slots */
44
+ TCG_CALL_ARG_EXTEND, /* for i32, as a sign/zero-extended i64 */
45
+ TCG_CALL_ARG_EXTEND_U, /* ... as a zero-extended i64 */
46
+ TCG_CALL_ARG_EXTEND_S, /* ... as a sign-extended i64 */
47
+ TCG_CALL_ARG_BY_REF, /* for i128, by reference, first */
48
+ TCG_CALL_ARG_BY_REF_N, /* ... by reference, subsequent */
49
+} TCGCallArgumentKind;
50
+
51
+typedef struct TCGCallArgumentLoc {
52
+ TCGCallArgumentKind kind : 8;
53
+ unsigned arg_slot : 8;
54
+ unsigned ref_slot : 8;
55
+ unsigned arg_idx : 4;
56
+ unsigned tmp_subindex : 2;
57
+} TCGCallArgumentLoc;
58
+
59
+typedef struct TCGHelperInfo {
60
+ void *func;
61
+ const char *name;
62
+#ifdef CONFIG_TCG_INTERPRETER
63
+ ffi_cif *cif;
64
+#endif
65
+ unsigned typemask : 32;
66
+ unsigned flags : 8;
67
+ unsigned nr_in : 8;
68
+ unsigned nr_out : 8;
69
+ TCGCallReturnKind out_kind : 8;
70
+
71
+ /* Maximum physical arguments are constrained by TCG_TYPE_I128. */
72
+ TCGCallArgumentLoc in[MAX_CALL_IARGS * (128 / TCG_TARGET_REG_BITS)];
73
+} TCGHelperInfo;
74
+
75
+#endif /* TCG_HELPER_INFO_H */
76
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
65
index XXXXXXX..XXXXXXX 100644
77
index XXXXXXX..XXXXXXX 100644
66
--- a/include/exec/cpu-common.h
78
--- a/tcg/tcg-internal.h
67
+++ b/include/exec/cpu-common.h
79
+++ b/tcg/tcg-internal.h
68
@@ -XXX,XX +XXX,XX @@ void cpu_list_unlock(void);
80
@@ -XXX,XX +XXX,XX @@
69
unsigned int cpu_list_generation_id_get(void);
81
#ifndef TCG_INTERNAL_H
70
82
#define TCG_INTERNAL_H
71
void tcg_flush_softmmu_tlb(CPUState *cs);
83
72
+void tcg_flush_jmp_cache(CPUState *cs);
84
-#ifdef CONFIG_TCG_INTERPRETER
73
85
-#include <ffi.h>
74
void tcg_iommu_init_notifier_list(CPUState *cpu);
86
-#endif
75
void tcg_iommu_free_notifier_list(CPUState *cpu);
87
+#include "tcg/helper-info.h"
76
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
88
77
index XXXXXXX..XXXXXXX 100644
89
#define TCG_HIGHWATER 1024
78
--- a/include/hw/core/cpu.h
90
79
+++ b/include/hw/core/cpu.h
91
-/*
80
@@ -XXX,XX +XXX,XX @@ struct kvm_run;
92
- * Describe the calling convention of a given argument type.
81
struct hax_vcpu_state;
93
- */
82
struct hvf_vcpu_state;
94
-typedef enum {
83
95
- TCG_CALL_RET_NORMAL, /* by registers */
84
-#define TB_JMP_CACHE_BITS 12
96
- TCG_CALL_RET_BY_REF, /* for i128, by reference */
85
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
97
- TCG_CALL_RET_BY_VEC, /* for i128, by vector register */
98
-} TCGCallReturnKind;
86
-
99
-
87
/* work queue */
100
-typedef enum {
88
101
- TCG_CALL_ARG_NORMAL, /* by registers (continuing onto stack) */
89
/* The union type allows passing of 64 bit target pointers on 32 bit
102
- TCG_CALL_ARG_EVEN, /* like normal, but skipping odd slots */
90
@@ -XXX,XX +XXX,XX @@ struct CPUState {
103
- TCG_CALL_ARG_EXTEND, /* for i32, as a sign/zero-extended i64 */
91
CPUArchState *env_ptr;
104
- TCG_CALL_ARG_EXTEND_U, /* ... as a zero-extended i64 */
92
IcountDecr *icount_decr_ptr;
105
- TCG_CALL_ARG_EXTEND_S, /* ... as a sign-extended i64 */
93
106
- TCG_CALL_ARG_BY_REF, /* for i128, by reference, first */
94
- /* Accessed in parallel; all accesses must be atomic */
107
- TCG_CALL_ARG_BY_REF_N, /* ... by reference, subsequent */
95
- TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
108
-} TCGCallArgumentKind;
96
+ CPUJumpCache *tb_jmp_cache;
97
98
struct GDBRegisterState *gdb_regs;
99
int gdb_num_regs;
100
@@ -XXX,XX +XXX,XX @@ extern CPUTailQ cpus;
101
102
extern __thread CPUState *current_cpu;
103
104
-static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
105
-{
106
- unsigned int i;
107
-
109
-
108
- for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
110
-typedef struct TCGCallArgumentLoc {
109
- qatomic_set(&cpu->tb_jmp_cache[i], NULL);
111
- TCGCallArgumentKind kind : 8;
110
- }
112
- unsigned arg_slot : 8;
111
-}
113
- unsigned ref_slot : 8;
114
- unsigned arg_idx : 4;
115
- unsigned tmp_subindex : 2;
116
-} TCGCallArgumentLoc;
112
-
117
-
113
/**
118
-typedef struct TCGHelperInfo {
114
* qemu_tcg_mttcg_enabled:
119
- void *func;
115
* Check whether we are running MultiThread TCG or not.
120
- const char *name;
116
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
121
-#ifdef CONFIG_TCG_INTERPRETER
117
index XXXXXXX..XXXXXXX 100644
122
- ffi_cif *cif;
118
--- a/include/qemu/typedefs.h
123
-#endif
119
+++ b/include/qemu/typedefs.h
124
- unsigned typemask : 32;
120
@@ -XXX,XX +XXX,XX @@ typedef struct CoMutex CoMutex;
125
- unsigned flags : 8;
121
typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
126
- unsigned nr_in : 8;
122
typedef struct CPUAddressSpace CPUAddressSpace;
127
- unsigned nr_out : 8;
123
typedef struct CPUArchState CPUArchState;
128
- TCGCallReturnKind out_kind : 8;
124
+typedef struct CPUJumpCache CPUJumpCache;
125
typedef struct CPUState CPUState;
126
typedef struct CPUTLBEntryFull CPUTLBEntryFull;
127
typedef struct DeviceListener DeviceListener;
128
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/accel/stubs/tcg-stub.c
131
+++ b/accel/stubs/tcg-stub.c
132
@@ -XXX,XX +XXX,XX @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
133
{
134
}
135
136
+void tcg_flush_jmp_cache(CPUState *cpu)
137
+{
138
+}
139
+
140
int probe_access_flags(CPUArchState *env, target_ulong addr,
141
MMUAccessType access_type, int mmu_idx,
142
bool nonfault, void **phost, uintptr_t retaddr)
143
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/accel/tcg/cpu-exec.c
146
+++ b/accel/tcg/cpu-exec.c
147
@@ -XXX,XX +XXX,XX @@
148
#include "sysemu/replay.h"
149
#include "sysemu/tcg.h"
150
#include "exec/helper-proto.h"
151
+#include "tb-jmp-cache.h"
152
#include "tb-hash.h"
153
#include "tb-context.h"
154
#include "internal.h"
155
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
156
tcg_debug_assert(!(cflags & CF_INVALID));
157
158
hash = tb_jmp_cache_hash_func(pc);
159
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
160
+ tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
161
162
if (likely(tb &&
163
tb->pc == pc &&
164
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
165
if (tb == NULL) {
166
return NULL;
167
}
168
- qatomic_set(&cpu->tb_jmp_cache[hash], tb);
169
+ qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
170
return tb;
171
}
172
173
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
174
175
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
176
if (tb == NULL) {
177
+ uint32_t h;
178
+
179
mmap_lock();
180
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
181
mmap_unlock();
182
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
183
* We add the TB in the virtual pc hash table
184
* for the fast lookup
185
*/
186
- qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
187
+ h = tb_jmp_cache_hash_func(pc);
188
+ qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
189
}
190
191
#ifndef CONFIG_USER_ONLY
192
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
193
index XXXXXXX..XXXXXXX 100644
194
--- a/accel/tcg/cputlb.c
195
+++ b/accel/tcg/cputlb.c
196
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
197
198
static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
199
{
200
- unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
201
+ int i, i0 = tb_jmp_cache_hash_page(page_addr);
202
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
203
204
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
205
- qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
206
+ qatomic_set(&jc->array[i0 + i].tb, NULL);
207
}
208
}
209
210
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
211
212
qemu_spin_unlock(&env_tlb(env)->c.lock);
213
214
- cpu_tb_jmp_cache_clear(cpu);
215
+ tcg_flush_jmp_cache(cpu);
216
217
if (to_clean == ALL_MMUIDX_BITS) {
218
qatomic_set(&env_tlb(env)->c.full_flush_count,
219
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
220
* longer to clear each entry individually than it will to clear it all.
221
*/
222
if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
223
- cpu_tb_jmp_cache_clear(cpu);
224
+ tcg_flush_jmp_cache(cpu);
225
return;
226
}
227
228
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
229
index XXXXXXX..XXXXXXX 100644
230
--- a/accel/tcg/translate-all.c
231
+++ b/accel/tcg/translate-all.c
232
@@ -XXX,XX +XXX,XX @@
233
#include "sysemu/tcg.h"
234
#include "qapi/error.h"
235
#include "hw/core/tcg-cpu-ops.h"
236
+#include "tb-jmp-cache.h"
237
#include "tb-hash.h"
238
#include "tb-context.h"
239
#include "internal.h"
240
@@ -XXX,XX +XXX,XX @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
241
}
242
243
CPU_FOREACH(cpu) {
244
- cpu_tb_jmp_cache_clear(cpu);
245
+ tcg_flush_jmp_cache(cpu);
246
}
247
248
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
249
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
250
/* remove the TB from the hash list */
251
h = tb_jmp_cache_hash_func(tb->pc);
252
CPU_FOREACH(cpu) {
253
- if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
254
- qatomic_set(&cpu->tb_jmp_cache[h], NULL);
255
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
256
+ if (qatomic_read(&jc->array[h].tb) == tb) {
257
+ qatomic_set(&jc->array[h].tb, NULL);
258
}
259
}
260
261
@@ -XXX,XX +XXX,XX @@ int page_unprotect(target_ulong address, uintptr_t pc)
262
}
263
#endif /* CONFIG_USER_ONLY */
264
265
+/*
266
+ * Called by generic code at e.g. cpu reset after cpu creation,
267
+ * therefore we must be prepared to allocate the jump cache.
268
+ */
269
+void tcg_flush_jmp_cache(CPUState *cpu)
270
+{
271
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
272
+
273
+ if (likely(jc)) {
274
+ for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
275
+ qatomic_set(&jc->array[i].tb, NULL);
276
+ }
277
+ } else {
278
+ /* This should happen once during realize, and thus never race. */
279
+ jc = g_new0(CPUJumpCache, 1);
280
+ jc = qatomic_xchg(&cpu->tb_jmp_cache, jc);
281
+ assert(jc == NULL);
282
+ }
283
+}
284
+
285
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
286
void tcg_flush_softmmu_tlb(CPUState *cs)
287
{
288
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
289
index XXXXXXX..XXXXXXX 100644
290
--- a/hw/core/cpu-common.c
291
+++ b/hw/core/cpu-common.c
292
@@ -XXX,XX +XXX,XX @@ static void cpu_common_reset(DeviceState *dev)
293
cpu->cflags_next_tb = -1;
294
295
if (tcg_enabled()) {
296
- cpu_tb_jmp_cache_clear(cpu);
297
-
129
-
298
+ tcg_flush_jmp_cache(cpu);
130
- /* Maximum physical arguments are constrained by TCG_TYPE_I128. */
299
tcg_flush_softmmu_tlb(cpu);
131
- TCGCallArgumentLoc in[MAX_CALL_IARGS * (128 / TCG_TARGET_REG_BITS)];
300
}
132
-} TCGHelperInfo;
301
}
133
-
302
diff --git a/plugins/core.c b/plugins/core.c
134
extern TCGContext tcg_init_ctx;
303
index XXXXXXX..XXXXXXX 100644
135
extern TCGContext **tcg_ctxs;
304
--- a/plugins/core.c
136
extern unsigned int tcg_cur_ctxs;
305
+++ b/plugins/core.c
306
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
307
static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
308
{
309
bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX);
310
- cpu_tb_jmp_cache_clear(cpu);
311
+ tcg_flush_jmp_cache(cpu);
312
}
313
314
static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)
315
diff --git a/trace/control-target.c b/trace/control-target.c
316
index XXXXXXX..XXXXXXX 100644
317
--- a/trace/control-target.c
318
+++ b/trace/control-target.c
319
@@ -XXX,XX +XXX,XX @@ static void trace_event_synchronize_vcpu_state_dynamic(
320
{
321
bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed,
322
CPU_TRACE_DSTATE_MAX_EVENTS);
323
- cpu_tb_jmp_cache_clear(vcpu);
324
+ tcg_flush_jmp_cache(vcpu);
325
}
326
327
void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
328
--
137
--
329
2.34.1
138
2.34.1
330
139
331
140
diff view generated by jsdifflib
New patch
1
In preparation for compiling tcg/ only once, eliminate
2
the all_helpers array. Instantiate the info structs for
3
the generic helpers in accel/tcg/, and the structs for
4
the target-specific helpers in each translate.c.
1
5
6
Since we don't see all of the info structs at startup,
7
initialize at first use, using g_once_init_* to make
8
sure we don't race while doing so.
9
10
Reviewed-by: Anton Johansson <anjo@rev.ng>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
MAINTAINERS | 1 +
14
include/exec/helper-gen.h | 66 ++++++++++++--------
15
include/exec/helper-tcg.h | 75 -----------------------
16
include/qemu/typedefs.h | 1 +
17
include/tcg/helper-info.h | 9 ++-
18
include/tcg/tcg.h | 2 +-
19
accel/tcg/plugin-gen.c | 5 ++
20
accel/tcg/tcg-runtime.c | 4 ++
21
target/alpha/translate.c | 3 +
22
target/arm/tcg/translate.c | 3 +
23
target/avr/translate.c | 5 ++
24
target/cris/translate.c | 6 +-
25
target/hexagon/translate.c | 4 ++
26
target/hppa/translate.c | 5 ++
27
target/i386/tcg/translate.c | 5 ++
28
target/loongarch/translate.c | 4 ++
29
target/m68k/translate.c | 3 +
30
target/microblaze/translate.c | 4 ++
31
target/mips/tcg/translate.c | 5 ++
32
target/nios2/translate.c | 5 ++
33
target/openrisc/translate.c | 5 ++
34
target/ppc/translate.c | 4 ++
35
target/riscv/translate.c | 4 ++
36
target/rx/translate.c | 5 ++
37
target/s390x/tcg/translate.c | 4 ++
38
target/sh4/translate.c | 4 ++
39
target/sparc/translate.c | 3 +
40
target/tricore/translate.c | 5 ++
41
target/xtensa/translate.c | 4 ++
42
tcg/tcg.c | 108 ++++++++++++---------------------
43
include/exec/helper-info.c.inc | 96 +++++++++++++++++++++++++++++
44
31 files changed, 282 insertions(+), 175 deletions(-)
45
delete mode 100644 include/exec/helper-tcg.h
46
create mode 100644 include/exec/helper-info.c.inc
47
48
diff --git a/MAINTAINERS b/MAINTAINERS
49
index XXXXXXX..XXXXXXX 100644
50
--- a/MAINTAINERS
51
+++ b/MAINTAINERS
52
@@ -XXX,XX +XXX,XX @@ F: include/exec/exec-all.h
53
F: include/exec/tb-flush.h
54
F: include/exec/target_long.h
55
F: include/exec/helper*.h
56
+F: include/exec/helper-info.c.inc
57
F: include/sysemu/cpus.h
58
F: include/sysemu/tcg.h
59
F: include/hw/core/tcg-cpu-ops.h
60
diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h
61
index XXXXXXX..XXXXXXX 100644
62
--- a/include/exec/helper-gen.h
63
+++ b/include/exec/helper-gen.h
64
@@ -XXX,XX +XXX,XX @@
65
-/* Helper file for declaring TCG helper functions.
66
- This one expands generation functions for tcg opcodes. */
67
+/* SPDX-License-Identifier: GPL-2.0-or-later */
68
+/*
69
+ * Helper file for declaring TCG helper functions.
70
+ * This one expands generation functions for tcg opcodes.
71
+ * Define HELPER_H for the header file to be expanded,
72
+ * and static inline to change from global file scope.
73
+ */
74
75
#ifndef HELPER_GEN_H
76
#define HELPER_GEN_H
77
78
+#include "tcg/tcg.h"
79
+#include "tcg/helper-info.h"
80
#include "exec/helper-head.h"
81
82
#define DEF_HELPER_FLAGS_0(name, flags, ret) \
83
+extern TCGHelperInfo glue(helper_info_, name); \
84
static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
85
{ \
86
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 0, NULL); \
87
+ tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 0, NULL); \
88
}
89
90
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
91
+extern TCGHelperInfo glue(helper_info_, name); \
92
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
93
dh_arg_decl(t1, 1)) \
94
{ \
95
- TCGTemp *args[1] = { dh_arg(t1, 1) }; \
96
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 1, args); \
97
+ TCGTemp *args[1] = { dh_arg(t1, 1) }; \
98
+ tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 1, args); \
99
}
100
101
#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
102
+extern TCGHelperInfo glue(helper_info_, name); \
103
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
104
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
105
{ \
106
- TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
107
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 2, args); \
108
+ TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
109
+ tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 2, args); \
110
}
111
112
#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
113
+extern TCGHelperInfo glue(helper_info_, name); \
114
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
115
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
116
{ \
117
- TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
118
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 3, args); \
119
+ TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
120
+ tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 3, args); \
121
}
122
123
#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
124
+extern TCGHelperInfo glue(helper_info_, name); \
125
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
126
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
127
dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
128
{ \
129
- TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
130
- dh_arg(t3, 3), dh_arg(t4, 4) }; \
131
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 4, args); \
132
+ TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
133
+ dh_arg(t3, 3), dh_arg(t4, 4) }; \
134
+ tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 4, args); \
135
}
136
137
#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
138
+extern TCGHelperInfo glue(helper_info_, name); \
139
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
140
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
141
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
142
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
143
{ \
144
- TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
145
- dh_arg(t4, 4), dh_arg(t5, 5) }; \
146
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 5, args); \
147
+ TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
148
+ dh_arg(t4, 4), dh_arg(t5, 5) }; \
149
+ tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 5, args); \
150
}
151
152
#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
153
+extern TCGHelperInfo glue(helper_info_, name); \
154
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
155
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
156
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
157
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
158
{ \
159
- TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
160
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \
161
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 6, args); \
162
+ TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
163
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \
164
+ tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 6, args); \
165
}
166
167
#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
168
+extern TCGHelperInfo glue(helper_info_, name); \
169
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
170
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
171
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
172
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
173
dh_arg_decl(t7, 7)) \
174
{ \
175
- TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
176
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
177
- dh_arg(t7, 7) }; \
178
- tcg_gen_callN(HELPER(name), dh_retvar(ret), 7, args); \
179
+ TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
180
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
181
+ dh_arg(t7, 7) }; \
182
+ tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 7, args); \
183
}
184
185
#include "helper.h"
186
@@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
187
#undef DEF_HELPER_FLAGS_5
188
#undef DEF_HELPER_FLAGS_6
189
#undef DEF_HELPER_FLAGS_7
190
-#undef GEN_HELPER
191
192
#endif /* HELPER_GEN_H */
193
diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h
194
deleted file mode 100644
195
index XXXXXXX..XXXXXXX
196
--- a/include/exec/helper-tcg.h
197
+++ /dev/null
198
@@ -XXX,XX +XXX,XX @@
199
-/* Helper file for declaring TCG helper functions.
200
- This one defines data structures private to tcg.c. */
201
-
202
-#ifndef HELPER_TCG_H
203
-#define HELPER_TCG_H
204
-
205
-#include "exec/helper-head.h"
206
-
207
-/* Need one more level of indirection before stringification
208
- to get all the macros expanded first. */
209
-#define str(s) #s
210
-
211
-#define DEF_HELPER_FLAGS_0(NAME, FLAGS, ret) \
212
- { .func = HELPER(NAME), .name = str(NAME), \
213
- .flags = FLAGS | dh_callflag(ret), \
214
- .typemask = dh_typemask(ret, 0) },
215
-
216
-#define DEF_HELPER_FLAGS_1(NAME, FLAGS, ret, t1) \
217
- { .func = HELPER(NAME), .name = str(NAME), \
218
- .flags = FLAGS | dh_callflag(ret), \
219
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) },
220
-
221
-#define DEF_HELPER_FLAGS_2(NAME, FLAGS, ret, t1, t2) \
222
- { .func = HELPER(NAME), .name = str(NAME), \
223
- .flags = FLAGS | dh_callflag(ret), \
224
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
225
- | dh_typemask(t2, 2) },
226
-
227
-#define DEF_HELPER_FLAGS_3(NAME, FLAGS, ret, t1, t2, t3) \
228
- { .func = HELPER(NAME), .name = str(NAME), \
229
- .flags = FLAGS | dh_callflag(ret), \
230
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
231
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) },
232
-
233
-#define DEF_HELPER_FLAGS_4(NAME, FLAGS, ret, t1, t2, t3, t4) \
234
- { .func = HELPER(NAME), .name = str(NAME), \
235
- .flags = FLAGS | dh_callflag(ret), \
236
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
237
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) },
238
-
239
-#define DEF_HELPER_FLAGS_5(NAME, FLAGS, ret, t1, t2, t3, t4, t5) \
240
- { .func = HELPER(NAME), .name = str(NAME), \
241
- .flags = FLAGS | dh_callflag(ret), \
242
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
243
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \
244
- | dh_typemask(t5, 5) },
245
-
246
-#define DEF_HELPER_FLAGS_6(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6) \
247
- { .func = HELPER(NAME), .name = str(NAME), \
248
- .flags = FLAGS | dh_callflag(ret), \
249
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
250
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \
251
- | dh_typemask(t5, 5) | dh_typemask(t6, 6) },
252
-
253
-#define DEF_HELPER_FLAGS_7(NAME, FLAGS, ret, t1, t2, t3, t4, t5, t6, t7) \
254
- { .func = HELPER(NAME), .name = str(NAME), .flags = FLAGS, \
255
- .typemask = dh_typemask(ret, 0) | dh_typemask(t1, 1) \
256
- | dh_typemask(t2, 2) | dh_typemask(t3, 3) | dh_typemask(t4, 4) \
257
- | dh_typemask(t5, 5) | dh_typemask(t6, 6) | dh_typemask(t7, 7) },
258
-
259
-#include "helper.h"
260
-#include "accel/tcg/tcg-runtime.h"
261
-#include "accel/tcg/plugin-helpers.h"
262
-
263
-#undef str
264
-#undef DEF_HELPER_FLAGS_0
265
-#undef DEF_HELPER_FLAGS_1
266
-#undef DEF_HELPER_FLAGS_2
267
-#undef DEF_HELPER_FLAGS_3
268
-#undef DEF_HELPER_FLAGS_4
269
-#undef DEF_HELPER_FLAGS_5
270
-#undef DEF_HELPER_FLAGS_6
271
-#undef DEF_HELPER_FLAGS_7
272
-
273
-#endif /* HELPER_TCG_H */
274
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
275
index XXXXXXX..XXXXXXX 100644
276
--- a/include/qemu/typedefs.h
277
+++ b/include/qemu/typedefs.h
278
@@ -XXX,XX +XXX,XX @@ typedef struct ReservedRegion ReservedRegion;
279
typedef struct SavedIOTLB SavedIOTLB;
280
typedef struct SHPCDevice SHPCDevice;
281
typedef struct SSIBus SSIBus;
282
+typedef struct TCGHelperInfo TCGHelperInfo;
283
typedef struct TranslationBlock TranslationBlock;
284
typedef struct VirtIODevice VirtIODevice;
285
typedef struct Visitor Visitor;
286
diff --git a/include/tcg/helper-info.h b/include/tcg/helper-info.h
287
index XXXXXXX..XXXXXXX 100644
288
--- a/include/tcg/helper-info.h
289
+++ b/include/tcg/helper-info.h
290
@@ -XXX,XX +XXX,XX @@ typedef struct TCGCallArgumentLoc {
291
unsigned tmp_subindex : 2;
292
} TCGCallArgumentLoc;
293
294
-typedef struct TCGHelperInfo {
295
+struct TCGHelperInfo {
296
void *func;
297
const char *name;
298
+
299
+ /* Used with g_once_init_enter. */
300
#ifdef CONFIG_TCG_INTERPRETER
301
ffi_cif *cif;
302
+#else
303
+ uintptr_t init;
304
#endif
305
+
306
unsigned typemask : 32;
307
unsigned flags : 8;
308
unsigned nr_in : 8;
309
@@ -XXX,XX +XXX,XX @@ typedef struct TCGHelperInfo {
310
311
/* Maximum physical arguments are constrained by TCG_TYPE_I128. */
312
TCGCallArgumentLoc in[MAX_CALL_IARGS * (128 / TCG_TARGET_REG_BITS)];
313
-} TCGHelperInfo;
314
+};
315
316
#endif /* TCG_HELPER_INFO_H */
317
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
318
index XXXXXXX..XXXXXXX 100644
319
--- a/include/tcg/tcg.h
320
+++ b/include/tcg/tcg.h
321
@@ -XXX,XX +XXX,XX @@ typedef struct TCGTargetOpDef {
322
323
bool tcg_op_supported(TCGOpcode op);
324
325
-void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args);
326
+void tcg_gen_callN(TCGHelperInfo *, TCGTemp *ret, int nargs, TCGTemp **args);
327
328
TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs);
329
void tcg_op_remove(TCGContext *s, TCGOp *op);
330
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
331
index XXXXXXX..XXXXXXX 100644
332
--- a/accel/tcg/plugin-gen.c
333
+++ b/accel/tcg/plugin-gen.c
334
@@ -XXX,XX +XXX,XX @@
335
#include "exec/exec-all.h"
336
#include "exec/plugin-gen.h"
337
#include "exec/translator.h"
338
+#include "exec/helper-proto.h"
339
+
340
+#define HELPER_H "accel/tcg/plugin-helpers.h"
341
+#include "exec/helper-info.c.inc"
342
+#undef HELPER_H
343
344
#ifdef CONFIG_SOFTMMU
345
# define CONFIG_SOFTMMU_GATE 1
346
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
347
index XXXXXXX..XXXXXXX 100644
348
--- a/accel/tcg/tcg-runtime.c
349
+++ b/accel/tcg/tcg-runtime.c
350
@@ -XXX,XX +XXX,XX @@
351
#include "exec/log.h"
352
#include "tcg/tcg.h"
353
354
+#define HELPER_H "accel/tcg/tcg-runtime.h"
355
+#include "exec/helper-info.c.inc"
356
+#undef HELPER_H
357
+
358
/* 32-bit helpers */
359
360
int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2)
361
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
362
index XXXXXXX..XXXXXXX 100644
363
--- a/target/alpha/translate.c
364
+++ b/target/alpha/translate.c
365
@@ -XXX,XX +XXX,XX @@
366
#include "exec/translator.h"
367
#include "exec/log.h"
368
369
+#define HELPER_H "helper.h"
370
+#include "exec/helper-info.c.inc"
371
+#undef HELPER_H
372
373
#undef ALPHA_DEBUG_DISAS
374
#define CONFIG_SOFTFLOAT_INLINE
375
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
376
index XXXXXXX..XXXXXXX 100644
377
--- a/target/arm/tcg/translate.c
378
+++ b/target/arm/tcg/translate.c
379
@@ -XXX,XX +XXX,XX @@
380
#include "exec/log.h"
381
#include "cpregs.h"
382
383
+#define HELPER_H "helper.h"
384
+#include "exec/helper-info.c.inc"
385
+#undef HELPER_H
386
387
#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
388
#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
389
diff --git a/target/avr/translate.c b/target/avr/translate.c
390
index XXXXXXX..XXXXXXX 100644
391
--- a/target/avr/translate.c
392
+++ b/target/avr/translate.c
393
@@ -XXX,XX +XXX,XX @@
394
#include "exec/translator.h"
395
#include "exec/gen-icount.h"
396
397
+#define HELPER_H "helper.h"
398
+#include "exec/helper-info.c.inc"
399
+#undef HELPER_H
400
+
401
+
402
/*
403
* Define if you want a BREAK instruction translated to a breakpoint
404
* Active debugging connection is assumed
405
diff --git a/target/cris/translate.c b/target/cris/translate.c
406
index XXXXXXX..XXXXXXX 100644
407
--- a/target/cris/translate.c
408
+++ b/target/cris/translate.c
409
@@ -XXX,XX +XXX,XX @@
410
#include "exec/translator.h"
411
#include "crisv32-decode.h"
412
#include "qemu/qemu-print.h"
413
-
414
#include "exec/helper-gen.h"
415
-
416
#include "exec/log.h"
417
418
+#define HELPER_H "helper.h"
419
+#include "exec/helper-info.c.inc"
420
+#undef HELPER_H
421
+
422
423
#define DISAS_CRIS 0
424
#if DISAS_CRIS
425
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
426
index XXXXXXX..XXXXXXX 100644
427
--- a/target/hexagon/translate.c
428
+++ b/target/hexagon/translate.c
429
@@ -XXX,XX +XXX,XX @@
430
#include "genptr.h"
431
#include "printinsn.h"
432
433
+#define HELPER_H "helper.h"
434
+#include "exec/helper-info.c.inc"
435
+#undef HELPER_H
436
+
437
#include "analyze_funcs_generated.c.inc"
438
439
typedef void (*AnalyzeInsn)(DisasContext *ctx);
440
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
441
index XXXXXXX..XXXXXXX 100644
442
--- a/target/hppa/translate.c
443
+++ b/target/hppa/translate.c
444
@@ -XXX,XX +XXX,XX @@
445
#include "exec/translator.h"
446
#include "exec/log.h"
447
448
+#define HELPER_H "helper.h"
449
+#include "exec/helper-info.c.inc"
450
+#undef HELPER_H
451
+
452
+
453
/* Since we have a distinction between register size and address size,
454
we need to redefine all of these. */
455
456
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
457
index XXXXXXX..XXXXXXX 100644
458
--- a/target/i386/tcg/translate.c
459
+++ b/target/i386/tcg/translate.c
460
@@ -XXX,XX +XXX,XX @@
461
462
#include "exec/log.h"
463
464
+#define HELPER_H "helper.h"
465
+#include "exec/helper-info.c.inc"
466
+#undef HELPER_H
467
+
468
+
469
#define PREFIX_REPZ 0x01
470
#define PREFIX_REPNZ 0x02
471
#define PREFIX_LOCK 0x04
472
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
473
index XXXXXXX..XXXXXXX 100644
474
--- a/target/loongarch/translate.c
475
+++ b/target/loongarch/translate.c
476
@@ -XXX,XX +XXX,XX @@ static TCGv cpu_lladdr, cpu_llval;
477
478
#include "exec/gen-icount.h"
479
480
+#define HELPER_H "helper.h"
481
+#include "exec/helper-info.c.inc"
482
+#undef HELPER_H
483
+
484
#define DISAS_STOP DISAS_TARGET_0
485
#define DISAS_EXIT DISAS_TARGET_1
486
#define DISAS_EXIT_UPDATE DISAS_TARGET_2
487
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
488
index XXXXXXX..XXXXXXX 100644
489
--- a/target/m68k/translate.c
490
+++ b/target/m68k/translate.c
491
@@ -XXX,XX +XXX,XX @@
492
#include "exec/log.h"
493
#include "fpu/softfloat.h"
494
495
+#define HELPER_H "helper.h"
496
+#include "exec/helper-info.c.inc"
497
+#undef HELPER_H
498
499
//#define DEBUG_DISPATCH 1
500
501
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
502
index XXXXXXX..XXXXXXX 100644
503
--- a/target/microblaze/translate.c
504
+++ b/target/microblaze/translate.c
505
@@ -XXX,XX +XXX,XX @@
506
507
#include "exec/log.h"
508
509
+#define HELPER_H "helper.h"
510
+#include "exec/helper-info.c.inc"
511
+#undef HELPER_H
512
+
513
#define EXTRACT_FIELD(src, start, end) \
514
(((src) >> start) & ((1 << (end - start + 1)) - 1))
515
516
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
517
index XXXXXXX..XXXXXXX 100644
518
--- a/target/mips/tcg/translate.c
519
+++ b/target/mips/tcg/translate.c
520
@@ -XXX,XX +XXX,XX @@
521
#include "fpu_helper.h"
522
#include "translate.h"
523
524
+#define HELPER_H "helper.h"
525
+#include "exec/helper-info.c.inc"
526
+#undef HELPER_H
527
+
528
+
529
/*
530
* Many sysemu-only helpers are not reachable for user-only.
531
* Define stub generators here, so that we need not either sprinkle
532
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
533
index XXXXXXX..XXXXXXX 100644
534
--- a/target/nios2/translate.c
535
+++ b/target/nios2/translate.c
536
@@ -XXX,XX +XXX,XX @@
537
#include "exec/gen-icount.h"
538
#include "semihosting/semihost.h"
539
540
+#define HELPER_H "helper.h"
541
+#include "exec/helper-info.c.inc"
542
+#undef HELPER_H
543
+
544
+
545
/* is_jmp field values */
546
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
547
548
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
549
index XXXXXXX..XXXXXXX 100644
550
--- a/target/openrisc/translate.c
551
+++ b/target/openrisc/translate.c
552
@@ -XXX,XX +XXX,XX @@
553
554
#include "exec/log.h"
555
556
+#define HELPER_H "helper.h"
557
+#include "exec/helper-info.c.inc"
558
+#undef HELPER_H
559
+
560
+
561
/* is_jmp field values */
562
#define DISAS_EXIT DISAS_TARGET_0 /* force exit to main loop */
563
#define DISAS_JUMP DISAS_TARGET_1 /* exit via jmp_pc/jmp_pc_imm */
564
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
565
index XXXXXXX..XXXXXXX 100644
566
--- a/target/ppc/translate.c
567
+++ b/target/ppc/translate.c
568
@@ -XXX,XX +XXX,XX @@
569
#include "qemu/qemu-print.h"
570
#include "qapi/error.h"
571
572
+#define HELPER_H "helper.h"
573
+#include "exec/helper-info.c.inc"
574
+#undef HELPER_H
575
+
576
#define CPU_SINGLE_STEP 0x1
577
#define CPU_BRANCH_STEP 0x2
578
579
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
580
index XXXXXXX..XXXXXXX 100644
581
--- a/target/riscv/translate.c
582
+++ b/target/riscv/translate.c
583
@@ -XXX,XX +XXX,XX @@
584
#include "instmap.h"
585
#include "internals.h"
586
587
+#define HELPER_H "helper.h"
588
+#include "exec/helper-info.c.inc"
589
+#undef HELPER_H
590
+
591
/* global register indices */
592
static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
593
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
594
diff --git a/target/rx/translate.c b/target/rx/translate.c
595
index XXXXXXX..XXXXXXX 100644
596
--- a/target/rx/translate.c
597
+++ b/target/rx/translate.c
598
@@ -XXX,XX +XXX,XX @@
599
#include "exec/translator.h"
600
#include "exec/log.h"
601
602
+#define HELPER_H "helper.h"
603
+#include "exec/helper-info.c.inc"
604
+#undef HELPER_H
605
+
606
+
607
typedef struct DisasContext {
608
DisasContextBase base;
609
CPURXState *env;
610
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
611
index XXXXXXX..XXXXXXX 100644
612
--- a/target/s390x/tcg/translate.c
613
+++ b/target/s390x/tcg/translate.c
614
@@ -XXX,XX +XXX,XX @@
615
#include "exec/log.h"
616
#include "qemu/atomic128.h"
617
618
+#define HELPER_H "helper.h"
619
+#include "exec/helper-info.c.inc"
620
+#undef HELPER_H
621
+
622
623
/* Information that (most) every instruction needs to manipulate. */
624
typedef struct DisasContext DisasContext;
625
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
626
index XXXXXXX..XXXXXXX 100644
627
--- a/target/sh4/translate.c
628
+++ b/target/sh4/translate.c
629
@@ -XXX,XX +XXX,XX @@
630
#include "exec/log.h"
631
#include "qemu/qemu-print.h"
632
633
+#define HELPER_H "helper.h"
634
+#include "exec/helper-info.c.inc"
635
+#undef HELPER_H
636
+
637
638
typedef struct DisasContext {
639
DisasContextBase base;
640
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
641
index XXXXXXX..XXXXXXX 100644
642
--- a/target/sparc/translate.c
643
+++ b/target/sparc/translate.c
644
@@ -XXX,XX +XXX,XX @@
645
#include "exec/log.h"
646
#include "asi.h"
647
648
+#define HELPER_H "helper.h"
649
+#include "exec/helper-info.c.inc"
650
+#undef HELPER_H
651
652
#define DYNAMIC_PC 1 /* dynamic pc value */
653
#define JUMP_PC 2 /* dynamic pc value which takes only two values
654
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
655
index XXXXXXX..XXXXXXX 100644
656
--- a/target/tricore/translate.c
657
+++ b/target/tricore/translate.c
658
@@ -XXX,XX +XXX,XX @@
659
#include "exec/translator.h"
660
#include "exec/log.h"
661
662
+#define HELPER_H "helper.h"
663
+#include "exec/helper-info.c.inc"
664
+#undef HELPER_H
665
+
666
+
667
/*
668
* TCG registers
669
*/
670
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
671
index XXXXXXX..XXXXXXX 100644
672
--- a/target/xtensa/translate.c
673
+++ b/target/xtensa/translate.c
674
@@ -XXX,XX +XXX,XX @@
675
676
#include "exec/log.h"
677
678
+#define HELPER_H "helper.h"
679
+#include "exec/helper-info.c.inc"
680
+#undef HELPER_H
681
+
682
683
struct DisasContext {
684
DisasContextBase base;
685
diff --git a/tcg/tcg.c b/tcg/tcg.c
686
index XXXXXXX..XXXXXXX 100644
687
--- a/tcg/tcg.c
688
+++ b/tcg/tcg.c
689
@@ -XXX,XX +XXX,XX @@ void tcg_pool_reset(TCGContext *s)
690
s->pool_current = NULL;
691
}
692
693
-#include "exec/helper-proto.h"
694
-
695
-static TCGHelperInfo all_helpers[] = {
696
-#include "exec/helper-tcg.h"
697
-};
698
-static GHashTable *helper_table;
699
-
700
/*
701
* Create TCGHelperInfo structures for "tcg/tcg-ldst.h" functions,
702
* akin to what "exec/helper-tcg.h" does with DEF_HELPER_FLAGS_N.
703
@@ -XXX,XX +XXX,XX @@ static ffi_type *typecode_to_ffi(int argmask)
704
g_assert_not_reached();
705
}
706
707
-static void init_ffi_layouts(void)
708
+static ffi_cif *init_ffi_layout(TCGHelperInfo *info)
709
{
710
- /* g_direct_hash/equal for direct comparisons on uint32_t. */
711
- GHashTable *ffi_table = g_hash_table_new(NULL, NULL);
712
+ unsigned typemask = info->typemask;
713
+ struct {
714
+ ffi_cif cif;
715
+ ffi_type *args[];
716
+ } *ca;
717
+ ffi_status status;
718
+ int nargs;
719
720
- for (int i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
721
- TCGHelperInfo *info = &all_helpers[i];
722
- unsigned typemask = info->typemask;
723
- gpointer hash = (gpointer)(uintptr_t)typemask;
724
- struct {
725
- ffi_cif cif;
726
- ffi_type *args[];
727
- } *ca;
728
- ffi_status status;
729
- int nargs;
730
- ffi_cif *cif;
731
+ /* Ignoring the return type, find the last non-zero field. */
732
+ nargs = 32 - clz32(typemask >> 3);
733
+ nargs = DIV_ROUND_UP(nargs, 3);
734
+ assert(nargs <= MAX_CALL_IARGS);
735
736
- cif = g_hash_table_lookup(ffi_table, hash);
737
- if (cif) {
738
- info->cif = cif;
739
- continue;
740
+ ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
741
+ ca->cif.rtype = typecode_to_ffi(typemask & 7);
742
+ ca->cif.nargs = nargs;
743
+
744
+ if (nargs != 0) {
745
+ ca->cif.arg_types = ca->args;
746
+ for (int j = 0; j < nargs; ++j) {
747
+ int typecode = extract32(typemask, (j + 1) * 3, 3);
748
+ ca->args[j] = typecode_to_ffi(typecode);
749
}
750
-
751
- /* Ignoring the return type, find the last non-zero field. */
752
- nargs = 32 - clz32(typemask >> 3);
753
- nargs = DIV_ROUND_UP(nargs, 3);
754
- assert(nargs <= MAX_CALL_IARGS);
755
-
756
- ca = g_malloc0(sizeof(*ca) + nargs * sizeof(ffi_type *));
757
- ca->cif.rtype = typecode_to_ffi(typemask & 7);
758
- ca->cif.nargs = nargs;
759
-
760
- if (nargs != 0) {
761
- ca->cif.arg_types = ca->args;
762
- for (int j = 0; j < nargs; ++j) {
763
- int typecode = extract32(typemask, (j + 1) * 3, 3);
764
- ca->args[j] = typecode_to_ffi(typecode);
765
- }
766
- }
767
-
768
- status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
769
- ca->cif.rtype, ca->cif.arg_types);
770
- assert(status == FFI_OK);
771
-
772
- cif = &ca->cif;
773
- info->cif = cif;
774
- g_hash_table_insert(ffi_table, hash, (gpointer)cif);
775
}
776
777
- g_hash_table_destroy(ffi_table);
778
+ status = ffi_prep_cif(&ca->cif, FFI_DEFAULT_ABI, nargs,
779
+ ca->cif.rtype, ca->cif.arg_types);
780
+ assert(status == FFI_OK);
781
+
782
+ return &ca->cif;
783
}
784
+
785
+#define HELPER_INFO_INIT(I) (&(I)->cif)
786
+#define HELPER_INFO_INIT_VAL(I) init_ffi_layout(I)
787
+#else
788
+#define HELPER_INFO_INIT(I) (&(I)->init)
789
+#define HELPER_INFO_INIT_VAL(I) 1
790
#endif /* CONFIG_TCG_INTERPRETER */
791
792
static inline bool arg_slot_reg_p(unsigned arg_slot)
793
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(unsigned max_cpus)
794
args_ct += n;
795
}
796
797
- /* Register helpers. */
798
- /* Use g_direct_hash/equal for direct pointer comparisons on func. */
799
- helper_table = g_hash_table_new(NULL, NULL);
800
-
801
- for (i = 0; i < ARRAY_SIZE(all_helpers); ++i) {
802
- init_call_layout(&all_helpers[i]);
803
- g_hash_table_insert(helper_table, (gpointer)all_helpers[i].func,
804
- (gpointer)&all_helpers[i]);
805
- }
806
-
807
init_call_layout(&info_helper_ld32_mmu);
808
init_call_layout(&info_helper_ld64_mmu);
809
init_call_layout(&info_helper_ld128_mmu);
810
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(unsigned max_cpus)
811
init_call_layout(&info_helper_st64_mmu);
812
init_call_layout(&info_helper_st128_mmu);
813
814
-#ifdef CONFIG_TCG_INTERPRETER
815
- init_ffi_layouts();
816
-#endif
817
-
818
tcg_target_init(s);
819
process_op_defs(s);
820
821
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
822
823
static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
824
825
-void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
826
+void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, int nargs, TCGTemp **args)
827
{
828
- const TCGHelperInfo *info;
829
TCGv_i64 extend_free[MAX_CALL_IARGS];
830
int n_extend = 0;
831
TCGOp *op;
832
int i, n, pi = 0, total_args;
833
834
- info = g_hash_table_lookup(helper_table, (gpointer)func);
835
+ if (unlikely(g_once_init_enter(HELPER_INFO_INIT(info)))) {
836
+ init_call_layout(info);
837
+ g_once_init_leave(HELPER_INFO_INIT(info), HELPER_INFO_INIT_VAL(info));
838
+ }
839
+
840
total_args = info->nr_out + info->nr_in + 2;
841
op = tcg_op_alloc(INDEX_op_call, total_args);
842
843
@@ -XXX,XX +XXX,XX @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
844
g_assert_not_reached();
845
}
846
}
847
- op->args[pi++] = (uintptr_t)func;
848
+ op->args[pi++] = (uintptr_t)info->func;
849
op->args[pi++] = (uintptr_t)info;
850
tcg_debug_assert(pi == total_args);
851
852
diff --git a/include/exec/helper-info.c.inc b/include/exec/helper-info.c.inc
853
new file mode 100644
854
index XXXXXXX..XXXXXXX
855
--- /dev/null
856
+++ b/include/exec/helper-info.c.inc
857
@@ -XXX,XX +XXX,XX @@
858
+/* SPDX-License-Identifier: GPL-2.0-or-later */
859
+/*
860
+ * Helper file for declaring TCG helper functions.
861
+ * This one expands info structures for tcg helpers.
862
+ * Define HELPER_H for the header file to be expanded.
863
+ */
864
+
865
+#include "tcg/tcg.h"
866
+#include "tcg/helper-info.h"
867
+#include "exec/helper-head.h"
868
+
869
+/*
870
+ * Need one more level of indirection before stringification
871
+ * to get all the macros expanded first.
872
+ */
873
+#define str(s) #s
874
+
875
+#define DEF_HELPER_FLAGS_0(NAME, FLAGS, RET) \
876
+ TCGHelperInfo glue(helper_info_, NAME) = { \
877
+ .func = HELPER(NAME), .name = str(NAME), \
878
+ .flags = FLAGS | dh_callflag(RET), \
879
+ .typemask = dh_typemask(RET, 0) \
880
+ };
881
+
882
+#define DEF_HELPER_FLAGS_1(NAME, FLAGS, RET, T1) \
883
+ TCGHelperInfo glue(helper_info_, NAME) = { \
884
+ .func = HELPER(NAME), .name = str(NAME), \
885
+ .flags = FLAGS | dh_callflag(RET), \
886
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
887
+ };
888
+
889
+#define DEF_HELPER_FLAGS_2(NAME, FLAGS, RET, T1, T2) \
890
+ TCGHelperInfo glue(helper_info_, NAME) = { \
891
+ .func = HELPER(NAME), .name = str(NAME), \
892
+ .flags = FLAGS | dh_callflag(RET), \
893
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
894
+ | dh_typemask(T2, 2) \
895
+ };
896
+
897
+#define DEF_HELPER_FLAGS_3(NAME, FLAGS, RET, T1, T2, T3) \
898
+ TCGHelperInfo glue(helper_info_, NAME) = { \
899
+ .func = HELPER(NAME), .name = str(NAME), \
900
+ .flags = FLAGS | dh_callflag(RET), \
901
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
902
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
903
+ };
904
+
905
+#define DEF_HELPER_FLAGS_4(NAME, FLAGS, RET, T1, T2, T3, T4) \
906
+ TCGHelperInfo glue(helper_info_, NAME) = { \
907
+ .func = HELPER(NAME), .name = str(NAME), \
908
+ .flags = FLAGS | dh_callflag(RET), \
909
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
910
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
911
+ | dh_typemask(T4, 4) \
912
+ };
913
+
914
+#define DEF_HELPER_FLAGS_5(NAME, FLAGS, RET, T1, T2, T3, T4, T5) \
915
+ TCGHelperInfo glue(helper_info_, NAME) = { \
916
+ .func = HELPER(NAME), .name = str(NAME), \
917
+ .flags = FLAGS | dh_callflag(RET), \
918
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
919
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
920
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
921
+ };
922
+
923
+#define DEF_HELPER_FLAGS_6(NAME, FLAGS, RET, T1, T2, T3, T4, T5, T6) \
924
+ TCGHelperInfo glue(helper_info_, NAME) = { \
925
+ .func = HELPER(NAME), .name = str(NAME), \
926
+ .flags = FLAGS | dh_callflag(RET), \
927
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
928
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
929
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
930
+ | dh_typemask(T6, 6) \
931
+ };
932
+
933
+#define DEF_HELPER_FLAGS_7(NAME, FLAGS, RET, T1, T2, T3, T4, T5, T6, T7) \
934
+ TCGHelperInfo glue(helper_info_, NAME) = { \
935
+ .func = HELPER(NAME), .name = str(NAME), \
936
+ .flags = FLAGS | dh_callflag(RET), \
937
+ .typemask = dh_typemask(RET, 0) | dh_typemask(T1, 1) \
938
+ | dh_typemask(T2, 2) | dh_typemask(T3, 3) \
939
+ | dh_typemask(T4, 4) | dh_typemask(T5, 5) \
940
+ | dh_typemask(T6, 6) | dh_typemask(T7, 7) \
941
+ };
942
+
943
+#include HELPER_H
944
+
945
+#undef str
946
+#undef DEF_HELPER_FLAGS_0
947
+#undef DEF_HELPER_FLAGS_1
948
+#undef DEF_HELPER_FLAGS_2
949
+#undef DEF_HELPER_FLAGS_3
950
+#undef DEF_HELPER_FLAGS_4
951
+#undef DEF_HELPER_FLAGS_5
952
+#undef DEF_HELPER_FLAGS_6
953
+#undef DEF_HELPER_FLAGS_7
954
--
955
2.34.1
diff view generated by jsdifflib
New patch
1
Removes a multiplicity of calls to __assert_fail, saving up
2
to 360kiB of .text space as measured on an x86_64 host.
1
3
4
Old New Less %Change
5
9257272    8888680    368592    3.98%    qemu-system-aarch64
6
6100968    5911832    189136    3.10%    qemu-system-riscv64
7
5839112    5707032    132080    2.26%    qemu-system-mips
8
4447608    4341752    105856    2.38%    qemu-system-s390x
9
10
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
include/tcg/tcg.h | 30 ++++++++++++++++--------------
14
tcg/tcg.c | 19 +++++++++++++++++++
15
2 files changed, 35 insertions(+), 14 deletions(-)
16
17
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg.h
20
+++ b/include/tcg/tcg.h
21
@@ -XXX,XX +XXX,XX @@ static inline void *tcg_splitwx_to_rw(const void *rx)
22
}
23
#endif
24
25
-static inline size_t temp_idx(TCGTemp *ts)
26
-{
27
- ptrdiff_t n = ts - tcg_ctx->temps;
28
- tcg_debug_assert(n >= 0 && n < tcg_ctx->nb_temps);
29
- return n;
30
-}
31
-
32
static inline TCGArg temp_arg(TCGTemp *ts)
33
{
34
return (uintptr_t)ts;
35
@@ -XXX,XX +XXX,XX @@ static inline TCGTemp *arg_temp(TCGArg a)
36
return (TCGTemp *)(uintptr_t)a;
37
}
38
39
-/* Using the offset of a temporary, relative to TCGContext, rather than
40
- its index means that we don't use 0. That leaves offset 0 free for
41
- a NULL representation without having to leave index 0 unused. */
42
+#ifdef CONFIG_DEBUG_TCG
43
+size_t temp_idx(TCGTemp *ts);
44
+TCGTemp *tcgv_i32_temp(TCGv_i32 v);
45
+#else
46
+static inline size_t temp_idx(TCGTemp *ts)
47
+{
48
+ return ts - tcg_ctx->temps;
49
+}
50
+
51
+/*
52
+ * Using the offset of a temporary, relative to TCGContext, rather than
53
+ * its index means that we don't use 0. That leaves offset 0 free for
54
+ * a NULL representation without having to leave index 0 unused.
55
+ */
56
static inline TCGTemp *tcgv_i32_temp(TCGv_i32 v)
57
{
58
- uintptr_t o = (uintptr_t)v;
59
- TCGTemp *t = (void *)tcg_ctx + o;
60
- tcg_debug_assert(offsetof(TCGContext, temps[temp_idx(t)]) == o);
61
- return t;
62
+ return (void *)tcg_ctx + (uintptr_t)v;
63
}
64
+#endif
65
66
static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
67
{
68
diff --git a/tcg/tcg.c b/tcg/tcg.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tcg/tcg.c
71
+++ b/tcg/tcg.c
72
@@ -XXX,XX +XXX,XX @@ TCGv_vec tcg_constant_vec_matching(TCGv_vec match, unsigned vece, int64_t val)
73
return tcg_constant_vec(t->base_type, vece, val);
74
}
75
76
+#ifdef CONFIG_DEBUG_TCG
77
+size_t temp_idx(TCGTemp *ts)
78
+{
79
+ ptrdiff_t n = ts - tcg_ctx->temps;
80
+ assert(n >= 0 && n < tcg_ctx->nb_temps);
81
+ return n;
82
+}
83
+
84
+TCGTemp *tcgv_i32_temp(TCGv_i32 v)
85
+{
86
+ uintptr_t o = (uintptr_t)v - offsetof(TCGContext, temps);
87
+
88
+ assert(o < sizeof(TCGTemp) * tcg_ctx->nb_temps);
89
+ assert(o % sizeof(TCGTemp) == 0);
90
+
91
+ return (void *)tcg_ctx + (uintptr_t)v;
92
+}
93
+#endif /* CONFIG_DEBUG_TCG */
94
+
95
/* Return true if OP may appear in the opcode stream.
96
Test the runtime variable that controls each opcode. */
97
bool tcg_op_supported(TCGOpcode op)
98
--
99
2.34.1
100
101
diff view generated by jsdifflib
New patch
1
1
Make tcg_gen_callN a static function. Create tcg_gen_call[0-7]
2
functions for use by helper-gen.h.inc.
3
4
Removes a multiplicty of calls to __stack_chk_fail, saving up
5
to 143kiB of .text space as measured on an x86_64 host.
6
7
Old New Less %Change
8
8888680    8741816    146864    1.65%    qemu-system-aarch64
9
5911832    5856152    55680    0.94%    qemu-system-riscv64
10
5816728    5767512    49216    0.85%    qemu-system-mips64
11
6707832    6659144    48688    0.73%    qemu-system-ppc64
12
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
16
include/exec/helper-gen.h | 40 ++++++++++++++---------------
17
include/tcg/tcg.h | 14 +++++++++-
18
tcg/tcg.c | 54 ++++++++++++++++++++++++++++++++++++++-
19
3 files changed, 86 insertions(+), 22 deletions(-)
20
21
diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/include/exec/helper-gen.h
24
+++ b/include/exec/helper-gen.h
25
@@ -XXX,XX +XXX,XX @@
26
extern TCGHelperInfo glue(helper_info_, name); \
27
static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
28
{ \
29
- tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 0, NULL); \
30
+ tcg_gen_call0(&glue(helper_info_, name), dh_retvar(ret)); \
31
}
32
33
#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
34
@@ -XXX,XX +XXX,XX @@ extern TCGHelperInfo glue(helper_info_, name); \
35
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
36
dh_arg_decl(t1, 1)) \
37
{ \
38
- TCGTemp *args[1] = { dh_arg(t1, 1) }; \
39
- tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 1, args); \
40
+ tcg_gen_call1(&glue(helper_info_, name), dh_retvar(ret), \
41
+ dh_arg(t1, 1)); \
42
}
43
44
#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
45
@@ -XXX,XX +XXX,XX @@ extern TCGHelperInfo glue(helper_info_, name); \
46
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
47
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
48
{ \
49
- TCGTemp *args[2] = { dh_arg(t1, 1), dh_arg(t2, 2) }; \
50
- tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 2, args); \
51
+ tcg_gen_call2(&glue(helper_info_, name), dh_retvar(ret), \
52
+ dh_arg(t1, 1), dh_arg(t2, 2)); \
53
}
54
55
#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
56
@@ -XXX,XX +XXX,XX @@ extern TCGHelperInfo glue(helper_info_, name); \
57
static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
58
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
59
{ \
60
- TCGTemp *args[3] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3) }; \
61
- tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 3, args); \
62
+ tcg_gen_call3(&glue(helper_info_, name), dh_retvar(ret), \
63
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3)); \
64
}
65
66
#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
67
@@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
68
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
69
dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
70
{ \
71
- TCGTemp *args[4] = { dh_arg(t1, 1), dh_arg(t2, 2), \
72
- dh_arg(t3, 3), dh_arg(t4, 4) }; \
73
- tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 4, args); \
74
+ tcg_gen_call4(&glue(helper_info_, name), dh_retvar(ret), \
75
+ dh_arg(t1, 1), dh_arg(t2, 2), \
76
+ dh_arg(t3, 3), dh_arg(t4, 4)); \
77
}
78
79
#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
80
@@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
81
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
82
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
83
{ \
84
- TCGTemp *args[5] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
85
- dh_arg(t4, 4), dh_arg(t5, 5) }; \
86
- tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 5, args); \
87
+ tcg_gen_call5(&glue(helper_info_, name), dh_retvar(ret), \
88
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
89
+ dh_arg(t4, 4), dh_arg(t5, 5)); \
90
}
91
92
#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
93
@@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
94
dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
95
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
96
{ \
97
- TCGTemp *args[6] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
98
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6) }; \
99
- tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 6, args); \
100
+ tcg_gen_call6(&glue(helper_info_, name), dh_retvar(ret), \
101
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
102
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6)); \
103
}
104
105
#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
106
@@ -XXX,XX +XXX,XX @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
107
dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
108
dh_arg_decl(t7, 7)) \
109
{ \
110
- TCGTemp *args[7] = { dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
111
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
112
- dh_arg(t7, 7) }; \
113
- tcg_gen_callN(&glue(helper_info_, name), dh_retvar(ret), 7, args); \
114
+ tcg_gen_call7(&glue(helper_info_, name), dh_retvar(ret), \
115
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
116
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
117
+ dh_arg(t7, 7)); \
118
}
119
120
#include "helper.h"
121
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
122
index XXXXXXX..XXXXXXX 100644
123
--- a/include/tcg/tcg.h
124
+++ b/include/tcg/tcg.h
125
@@ -XXX,XX +XXX,XX @@ typedef struct TCGTargetOpDef {
126
127
bool tcg_op_supported(TCGOpcode op);
128
129
-void tcg_gen_callN(TCGHelperInfo *, TCGTemp *ret, int nargs, TCGTemp **args);
130
+void tcg_gen_call0(TCGHelperInfo *, TCGTemp *ret);
131
+void tcg_gen_call1(TCGHelperInfo *, TCGTemp *ret, TCGTemp *);
132
+void tcg_gen_call2(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *);
133
+void tcg_gen_call3(TCGHelperInfo *, TCGTemp *ret, TCGTemp *,
134
+ TCGTemp *, TCGTemp *);
135
+void tcg_gen_call4(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
136
+ TCGTemp *, TCGTemp *);
137
+void tcg_gen_call5(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
138
+ TCGTemp *, TCGTemp *, TCGTemp *);
139
+void tcg_gen_call6(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
140
+ TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *);
141
+void tcg_gen_call7(TCGHelperInfo *, TCGTemp *ret, TCGTemp *, TCGTemp *,
142
+ TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *, TCGTemp *);
143
144
TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs);
145
void tcg_op_remove(TCGContext *s, TCGOp *op);
146
diff --git a/tcg/tcg.c b/tcg/tcg.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/tcg/tcg.c
149
+++ b/tcg/tcg.c
150
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op)
151
152
static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
153
154
-void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, int nargs, TCGTemp **args)
155
+static void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, TCGTemp **args)
156
{
157
TCGv_i64 extend_free[MAX_CALL_IARGS];
158
int n_extend = 0;
159
@@ -XXX,XX +XXX,XX @@ void tcg_gen_callN(TCGHelperInfo *info, TCGTemp *ret, int nargs, TCGTemp **args)
160
}
161
}
162
163
+void tcg_gen_call0(TCGHelperInfo *info, TCGTemp *ret)
164
+{
165
+ tcg_gen_callN(info, ret, NULL);
166
+}
167
+
168
+void tcg_gen_call1(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1)
169
+{
170
+ tcg_gen_callN(info, ret, &t1);
171
+}
172
+
173
+void tcg_gen_call2(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2)
174
+{
175
+ TCGTemp *args[2] = { t1, t2 };
176
+ tcg_gen_callN(info, ret, args);
177
+}
178
+
179
+void tcg_gen_call3(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
180
+ TCGTemp *t2, TCGTemp *t3)
181
+{
182
+ TCGTemp *args[3] = { t1, t2, t3 };
183
+ tcg_gen_callN(info, ret, args);
184
+}
185
+
186
+void tcg_gen_call4(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
187
+ TCGTemp *t2, TCGTemp *t3, TCGTemp *t4)
188
+{
189
+ TCGTemp *args[4] = { t1, t2, t3, t4 };
190
+ tcg_gen_callN(info, ret, args);
191
+}
192
+
193
+void tcg_gen_call5(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
194
+ TCGTemp *t2, TCGTemp *t3, TCGTemp *t4, TCGTemp *t5)
195
+{
196
+ TCGTemp *args[5] = { t1, t2, t3, t4, t5 };
197
+ tcg_gen_callN(info, ret, args);
198
+}
199
+
200
+void tcg_gen_call6(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1, TCGTemp *t2,
201
+ TCGTemp *t3, TCGTemp *t4, TCGTemp *t5, TCGTemp *t6)
202
+{
203
+ TCGTemp *args[6] = { t1, t2, t3, t4, t5, t6 };
204
+ tcg_gen_callN(info, ret, args);
205
+}
206
+
207
+void tcg_gen_call7(TCGHelperInfo *info, TCGTemp *ret, TCGTemp *t1,
208
+ TCGTemp *t2, TCGTemp *t3, TCGTemp *t4,
209
+ TCGTemp *t5, TCGTemp *t6, TCGTemp *t7)
210
+{
211
+ TCGTemp *args[7] = { t1, t2, t3, t4, t5, t6, t7 };
212
+ tcg_gen_callN(info, ret, args);
213
+}
214
+
215
static void tcg_reg_alloc_start(TCGContext *s)
216
{
217
int i, n;
218
--
219
2.34.1
220
221
diff view generated by jsdifflib
New patch
1
Create helper-gen-common.h without the target specific portion.
2
Use that in tcg-op-common.h. Reorg headers in target/arm to
3
ensure that helper-gen.h is included before helper-info.c.inc.
4
All other targets are already correct in this regard.
1
5
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
MAINTAINERS | 1 +
10
include/exec/helper-gen-common.h | 18 ++++++
11
include/exec/helper-gen.h | 101 ++----------------------------
12
include/tcg/tcg-op-common.h | 2 +-
13
include/exec/helper-gen.h.inc | 102 +++++++++++++++++++++++++++++++
14
target/arm/tcg/translate.c | 8 +--
15
6 files changed, 129 insertions(+), 103 deletions(-)
16
create mode 100644 include/exec/helper-gen-common.h
17
create mode 100644 include/exec/helper-gen.h.inc
18
19
diff --git a/MAINTAINERS b/MAINTAINERS
20
index XXXXXXX..XXXXXXX 100644
21
--- a/MAINTAINERS
22
+++ b/MAINTAINERS
23
@@ -XXX,XX +XXX,XX @@ F: include/exec/exec-all.h
24
F: include/exec/tb-flush.h
25
F: include/exec/target_long.h
26
F: include/exec/helper*.h
27
+F: include/exec/helper*.h.inc
28
F: include/exec/helper-info.c.inc
29
F: include/sysemu/cpus.h
30
F: include/sysemu/tcg.h
31
diff --git a/include/exec/helper-gen-common.h b/include/exec/helper-gen-common.h
32
new file mode 100644
33
index XXXXXXX..XXXXXXX
34
--- /dev/null
35
+++ b/include/exec/helper-gen-common.h
36
@@ -XXX,XX +XXX,XX @@
37
+/* SPDX-License-Identifier: GPL-2.0-or-later */
38
+/*
39
+ * Helper file for declaring TCG helper functions.
40
+ * This one expands generation functions for tcg opcodes.
41
+ */
42
+
43
+#ifndef HELPER_GEN_COMMON_H
44
+#define HELPER_GEN_COMMON_H
45
+
46
+#define HELPER_H "accel/tcg/tcg-runtime.h"
47
+#include "exec/helper-gen.h.inc"
48
+#undef HELPER_H
49
+
50
+#define HELPER_H "accel/tcg/plugin-helpers.h"
51
+#include "exec/helper-gen.h.inc"
52
+#undef HELPER_H
53
+
54
+#endif /* HELPER_GEN_COMMON_H */
55
diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h
56
index XXXXXXX..XXXXXXX 100644
57
--- a/include/exec/helper-gen.h
58
+++ b/include/exec/helper-gen.h
59
@@ -XXX,XX +XXX,XX @@
60
/*
61
* Helper file for declaring TCG helper functions.
62
* This one expands generation functions for tcg opcodes.
63
- * Define HELPER_H for the header file to be expanded,
64
- * and static inline to change from global file scope.
65
*/
66
67
#ifndef HELPER_GEN_H
68
#define HELPER_GEN_H
69
70
-#include "tcg/tcg.h"
71
-#include "tcg/helper-info.h"
72
-#include "exec/helper-head.h"
73
+#include "exec/helper-gen-common.h"
74
75
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
76
-extern TCGHelperInfo glue(helper_info_, name); \
77
-static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
78
-{ \
79
- tcg_gen_call0(&glue(helper_info_, name), dh_retvar(ret)); \
80
-}
81
-
82
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
83
-extern TCGHelperInfo glue(helper_info_, name); \
84
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
85
- dh_arg_decl(t1, 1)) \
86
-{ \
87
- tcg_gen_call1(&glue(helper_info_, name), dh_retvar(ret), \
88
- dh_arg(t1, 1)); \
89
-}
90
-
91
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
92
-extern TCGHelperInfo glue(helper_info_, name); \
93
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
94
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
95
-{ \
96
- tcg_gen_call2(&glue(helper_info_, name), dh_retvar(ret), \
97
- dh_arg(t1, 1), dh_arg(t2, 2)); \
98
-}
99
-
100
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
101
-extern TCGHelperInfo glue(helper_info_, name); \
102
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
103
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
104
-{ \
105
- tcg_gen_call3(&glue(helper_info_, name), dh_retvar(ret), \
106
- dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3)); \
107
-}
108
-
109
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
110
-extern TCGHelperInfo glue(helper_info_, name); \
111
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
112
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
113
- dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
114
-{ \
115
- tcg_gen_call4(&glue(helper_info_, name), dh_retvar(ret), \
116
- dh_arg(t1, 1), dh_arg(t2, 2), \
117
- dh_arg(t3, 3), dh_arg(t4, 4)); \
118
-}
119
-
120
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
121
-extern TCGHelperInfo glue(helper_info_, name); \
122
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
123
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
124
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
125
-{ \
126
- tcg_gen_call5(&glue(helper_info_, name), dh_retvar(ret), \
127
- dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
128
- dh_arg(t4, 4), dh_arg(t5, 5)); \
129
-}
130
-
131
-#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
132
-extern TCGHelperInfo glue(helper_info_, name); \
133
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
134
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
135
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
136
-{ \
137
- tcg_gen_call6(&glue(helper_info_, name), dh_retvar(ret), \
138
- dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
139
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6)); \
140
-}
141
-
142
-#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
143
-extern TCGHelperInfo glue(helper_info_, name); \
144
-static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
145
- dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
146
- dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
147
- dh_arg_decl(t7, 7)) \
148
-{ \
149
- tcg_gen_call7(&glue(helper_info_, name), dh_retvar(ret), \
150
- dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
151
- dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
152
- dh_arg(t7, 7)); \
153
-}
154
-
155
-#include "helper.h"
156
-#include "accel/tcg/tcg-runtime.h"
157
-#include "accel/tcg/plugin-helpers.h"
158
-
159
-#undef DEF_HELPER_FLAGS_0
160
-#undef DEF_HELPER_FLAGS_1
161
-#undef DEF_HELPER_FLAGS_2
162
-#undef DEF_HELPER_FLAGS_3
163
-#undef DEF_HELPER_FLAGS_4
164
-#undef DEF_HELPER_FLAGS_5
165
-#undef DEF_HELPER_FLAGS_6
166
-#undef DEF_HELPER_FLAGS_7
167
+#define HELPER_H "helper.h"
168
+#include "exec/helper-gen.h.inc"
169
+#undef HELPER_H
170
171
#endif /* HELPER_GEN_H */
172
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
173
index XXXXXXX..XXXXXXX 100644
174
--- a/include/tcg/tcg-op-common.h
175
+++ b/include/tcg/tcg-op-common.h
176
@@ -XXX,XX +XXX,XX @@
177
178
#include "tcg/tcg.h"
179
#include "exec/helper-proto.h"
180
-#include "exec/helper-gen.h"
181
+#include "exec/helper-gen-common.h"
182
183
/* Basic output routines. Not for general consumption. */
184
185
diff --git a/include/exec/helper-gen.h.inc b/include/exec/helper-gen.h.inc
186
new file mode 100644
187
index XXXXXXX..XXXXXXX
188
--- /dev/null
189
+++ b/include/exec/helper-gen.h.inc
190
@@ -XXX,XX +XXX,XX @@
191
+/* SPDX-License-Identifier: GPL-2.0-or-later */
192
+/*
193
+ * Helper file for declaring TCG helper functions.
194
+ * This one expands generation functions for tcg opcodes.
195
+ * Define HELPER_H for the header file to be expanded,
196
+ * and static inline to change from global file scope.
197
+ */
198
+
199
+#include "tcg/tcg.h"
200
+#include "tcg/helper-info.h"
201
+#include "exec/helper-head.h"
202
+
203
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
204
+extern TCGHelperInfo glue(helper_info_, name); \
205
+static inline void glue(gen_helper_, name)(dh_retvar_decl0(ret)) \
206
+{ \
207
+ tcg_gen_call0(&glue(helper_info_, name), dh_retvar(ret)); \
208
+}
209
+
210
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
211
+extern TCGHelperInfo glue(helper_info_, name); \
212
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
213
+ dh_arg_decl(t1, 1)) \
214
+{ \
215
+ tcg_gen_call1(&glue(helper_info_, name), dh_retvar(ret), \
216
+ dh_arg(t1, 1)); \
217
+}
218
+
219
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
220
+extern TCGHelperInfo glue(helper_info_, name); \
221
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
222
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2)) \
223
+{ \
224
+ tcg_gen_call2(&glue(helper_info_, name), dh_retvar(ret), \
225
+ dh_arg(t1, 1), dh_arg(t2, 2)); \
226
+}
227
+
228
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
229
+extern TCGHelperInfo glue(helper_info_, name); \
230
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
231
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3)) \
232
+{ \
233
+ tcg_gen_call3(&glue(helper_info_, name), dh_retvar(ret), \
234
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3)); \
235
+}
236
+
237
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
238
+extern TCGHelperInfo glue(helper_info_, name); \
239
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
240
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), \
241
+ dh_arg_decl(t3, 3), dh_arg_decl(t4, 4)) \
242
+{ \
243
+ tcg_gen_call4(&glue(helper_info_, name), dh_retvar(ret), \
244
+ dh_arg(t1, 1), dh_arg(t2, 2), \
245
+ dh_arg(t3, 3), dh_arg(t4, 4)); \
246
+}
247
+
248
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
249
+extern TCGHelperInfo glue(helper_info_, name); \
250
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
251
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
252
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5)) \
253
+{ \
254
+ tcg_gen_call5(&glue(helper_info_, name), dh_retvar(ret), \
255
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
256
+ dh_arg(t4, 4), dh_arg(t5, 5)); \
257
+}
258
+
259
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
260
+extern TCGHelperInfo glue(helper_info_, name); \
261
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
262
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
263
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6)) \
264
+{ \
265
+ tcg_gen_call6(&glue(helper_info_, name), dh_retvar(ret), \
266
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
267
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6)); \
268
+}
269
+
270
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7)\
271
+extern TCGHelperInfo glue(helper_info_, name); \
272
+static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \
273
+ dh_arg_decl(t1, 1), dh_arg_decl(t2, 2), dh_arg_decl(t3, 3), \
274
+ dh_arg_decl(t4, 4), dh_arg_decl(t5, 5), dh_arg_decl(t6, 6), \
275
+ dh_arg_decl(t7, 7)) \
276
+{ \
277
+ tcg_gen_call7(&glue(helper_info_, name), dh_retvar(ret), \
278
+ dh_arg(t1, 1), dh_arg(t2, 2), dh_arg(t3, 3), \
279
+ dh_arg(t4, 4), dh_arg(t5, 5), dh_arg(t6, 6), \
280
+ dh_arg(t7, 7)); \
281
+}
282
+
283
+#include HELPER_H
284
+
285
+#undef DEF_HELPER_FLAGS_0
286
+#undef DEF_HELPER_FLAGS_1
287
+#undef DEF_HELPER_FLAGS_2
288
+#undef DEF_HELPER_FLAGS_3
289
+#undef DEF_HELPER_FLAGS_4
290
+#undef DEF_HELPER_FLAGS_5
291
+#undef DEF_HELPER_FLAGS_6
292
+#undef DEF_HELPER_FLAGS_7
293
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
294
index XXXXXXX..XXXXXXX 100644
295
--- a/target/arm/tcg/translate.c
296
+++ b/target/arm/tcg/translate.c
297
@@ -XXX,XX +XXX,XX @@
298
#include "semihosting/semihost.h"
299
#include "exec/log.h"
300
#include "cpregs.h"
301
+#include "translate.h"
302
+#include "translate-a32.h"
303
+#include "exec/gen-icount.h"
304
305
#define HELPER_H "helper.h"
306
#include "exec/helper-info.c.inc"
307
@@ -XXX,XX +XXX,XX @@
308
#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
309
#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
310
311
-#include "translate.h"
312
-#include "translate-a32.h"
313
-
314
/* These are TCG temporaries used only by the legacy iwMMXt decoder */
315
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
316
/* These are TCG globals which alias CPUARMState fields */
317
@@ -XXX,XX +XXX,XX @@ TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
318
TCGv_i64 cpu_exclusive_addr;
319
TCGv_i64 cpu_exclusive_val;
320
321
-#include "exec/gen-icount.h"
322
-
323
static const char * const regnames[] =
324
{ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
325
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
326
--
327
2.34.1
328
329
diff view generated by jsdifflib
1
This field is only written, not read; remove it.
1
Create helper-proto-common.h without the target specific portion.
2
Use that in tcg-op-common.h. Include helper-proto.h in target/arm
3
and target/hexagon before helper-info.c.inc; all other targets are
4
already correct in this regard.
2
5
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
include/hw/core/cpu.h | 1 -
9
include/exec/helper-proto-common.h | 18 ++++++++
9
accel/tcg/cputlb.c | 7 +++----
10
include/exec/helper-proto.h | 73 ++++--------------------------
10
2 files changed, 3 insertions(+), 5 deletions(-)
11
include/tcg/tcg-op-common.h | 2 +-
12
include/exec/helper-proto.h.inc | 68 ++++++++++++++++++++++++++++
13
accel/tcg/cputlb.c | 3 +-
14
accel/tcg/plugin-gen.c | 2 +-
15
accel/tcg/tcg-runtime-gvec.c | 2 +-
16
accel/tcg/tcg-runtime.c | 2 +-
17
target/arm/tcg/translate.c | 1 +
18
target/hexagon/translate.c | 1 +
19
10 files changed, 102 insertions(+), 70 deletions(-)
20
create mode 100644 include/exec/helper-proto-common.h
21
create mode 100644 include/exec/helper-proto.h.inc
11
22
12
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
23
diff --git a/include/exec/helper-proto-common.h b/include/exec/helper-proto-common.h
13
index XXXXXXX..XXXXXXX 100644
24
new file mode 100644
14
--- a/include/hw/core/cpu.h
25
index XXXXXXX..XXXXXXX
15
+++ b/include/hw/core/cpu.h
26
--- /dev/null
16
@@ -XXX,XX +XXX,XX @@ struct CPUWatchpoint {
27
+++ b/include/exec/helper-proto-common.h
17
* the memory regions get moved around by io_writex.
28
@@ -XXX,XX +XXX,XX @@
18
*/
29
+/* SPDX-License-Identifier: GPL-2.0-or-later */
19
typedef struct SavedIOTLB {
30
+/*
20
- hwaddr addr;
31
+ * Helper file for declaring TCG helper functions.
21
MemoryRegionSection *section;
32
+ * This one expands prototypes for the helper functions.
22
hwaddr mr_offset;
33
+ */
23
} SavedIOTLB;
34
+
35
+#ifndef HELPER_PROTO_COMMON_H
36
+#define HELPER_PROTO_COMMON_H
37
+
38
+#define HELPER_H "accel/tcg/tcg-runtime.h"
39
+#include "exec/helper-proto.h.inc"
40
+#undef HELPER_H
41
+
42
+#define HELPER_H "accel/tcg/plugin-helpers.h"
43
+#include "exec/helper-proto.h.inc"
44
+#undef HELPER_H
45
+
46
+#endif /* HELPER_PROTO_COMMON_H */
47
diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h
48
index XXXXXXX..XXXXXXX 100644
49
--- a/include/exec/helper-proto.h
50
+++ b/include/exec/helper-proto.h
51
@@ -XXX,XX +XXX,XX @@
52
-/* Helper file for declaring TCG helper functions.
53
- This one expands prototypes for the helper functions. */
54
+/* SPDX-License-Identifier: GPL-2.0-or-later */
55
+/*
56
+ * Helper file for declaring TCG helper functions.
57
+ * This one expands prototypes for the helper functions.
58
+ */
59
60
#ifndef HELPER_PROTO_H
61
#define HELPER_PROTO_H
62
63
-#include "exec/helper-head.h"
64
+#include "exec/helper-proto-common.h"
65
66
-/*
67
- * Work around an issue with --enable-lto, in which GCC's ipa-split pass
68
- * decides to split out the noreturn code paths that raise an exception,
69
- * taking the __builtin_return_address() along into the new function,
70
- * where it no longer computes a value that returns to TCG generated code.
71
- * Despite the name, the noinline attribute affects splitter, so this
72
- * prevents the optimization in question. Given that helpers should not
73
- * otherwise be called directly, this should have any other visible effect.
74
- *
75
- * See https://gitlab.com/qemu-project/qemu/-/issues/1454
76
- */
77
-#define DEF_HELPER_ATTR __attribute__((noinline))
78
-
79
-#define DEF_HELPER_FLAGS_0(name, flags, ret) \
80
-dh_ctype(ret) HELPER(name) (void) DEF_HELPER_ATTR;
81
-
82
-#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
83
-dh_ctype(ret) HELPER(name) (dh_ctype(t1)) DEF_HELPER_ATTR;
84
-
85
-#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
86
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)) DEF_HELPER_ATTR;
87
-
88
-#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
89
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), \
90
- dh_ctype(t3)) DEF_HELPER_ATTR;
91
-
92
-#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
93
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
94
- dh_ctype(t4)) DEF_HELPER_ATTR;
95
-
96
-#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
97
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
98
- dh_ctype(t4), dh_ctype(t5)) DEF_HELPER_ATTR;
99
-
100
-#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
101
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
102
- dh_ctype(t4), dh_ctype(t5), \
103
- dh_ctype(t6)) DEF_HELPER_ATTR;
104
-
105
-#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
106
-dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
107
- dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
108
- dh_ctype(t7)) DEF_HELPER_ATTR;
109
-
110
-#define IN_HELPER_PROTO
111
-
112
-#include "helper.h"
113
-#include "accel/tcg/tcg-runtime.h"
114
-#include "accel/tcg/plugin-helpers.h"
115
-
116
-#undef IN_HELPER_PROTO
117
-
118
-#undef DEF_HELPER_FLAGS_0
119
-#undef DEF_HELPER_FLAGS_1
120
-#undef DEF_HELPER_FLAGS_2
121
-#undef DEF_HELPER_FLAGS_3
122
-#undef DEF_HELPER_FLAGS_4
123
-#undef DEF_HELPER_FLAGS_5
124
-#undef DEF_HELPER_FLAGS_6
125
-#undef DEF_HELPER_FLAGS_7
126
-#undef DEF_HELPER_ATTR
127
+#define HELPER_H "helper.h"
128
+#include "exec/helper-proto.h.inc"
129
+#undef HELPER_H
130
131
#endif /* HELPER_PROTO_H */
132
diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h
133
index XXXXXXX..XXXXXXX 100644
134
--- a/include/tcg/tcg-op-common.h
135
+++ b/include/tcg/tcg-op-common.h
136
@@ -XXX,XX +XXX,XX @@
137
#define TCG_TCG_OP_COMMON_H
138
139
#include "tcg/tcg.h"
140
-#include "exec/helper-proto.h"
141
+#include "exec/helper-proto-common.h"
142
#include "exec/helper-gen-common.h"
143
144
/* Basic output routines. Not for general consumption. */
145
diff --git a/include/exec/helper-proto.h.inc b/include/exec/helper-proto.h.inc
146
new file mode 100644
147
index XXXXXXX..XXXXXXX
148
--- /dev/null
149
+++ b/include/exec/helper-proto.h.inc
150
@@ -XXX,XX +XXX,XX @@
151
+/* SPDX-License-Identifier: GPL-2.0-or-later */
152
+/*
153
+ * Helper file for declaring TCG helper functions.
154
+ * This one expands prototypes for the helper functions.
155
+ * Define HELPER_H for the header file to be expanded.
156
+ */
157
+
158
+#include "exec/helper-head.h"
159
+
160
+/*
161
+ * Work around an issue with --enable-lto, in which GCC's ipa-split pass
162
+ * decides to split out the noreturn code paths that raise an exception,
163
+ * taking the __builtin_return_address() along into the new function,
164
+ * where it no longer computes a value that returns to TCG generated code.
165
+ * Despite the name, the noinline attribute affects splitter, so this
166
+ * prevents the optimization in question. Given that helpers should not
167
+ * otherwise be called directly, this should not have any other visible effect.
168
+ *
169
+ * See https://gitlab.com/qemu-project/qemu/-/issues/1454
170
+ */
171
+#define DEF_HELPER_ATTR __attribute__((noinline))
172
+
173
+#define DEF_HELPER_FLAGS_0(name, flags, ret) \
174
+dh_ctype(ret) HELPER(name) (void) DEF_HELPER_ATTR;
175
+
176
+#define DEF_HELPER_FLAGS_1(name, flags, ret, t1) \
177
+dh_ctype(ret) HELPER(name) (dh_ctype(t1)) DEF_HELPER_ATTR;
178
+
179
+#define DEF_HELPER_FLAGS_2(name, flags, ret, t1, t2) \
180
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2)) DEF_HELPER_ATTR;
181
+
182
+#define DEF_HELPER_FLAGS_3(name, flags, ret, t1, t2, t3) \
183
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), \
184
+ dh_ctype(t3)) DEF_HELPER_ATTR;
185
+
186
+#define DEF_HELPER_FLAGS_4(name, flags, ret, t1, t2, t3, t4) \
187
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
188
+ dh_ctype(t4)) DEF_HELPER_ATTR;
189
+
190
+#define DEF_HELPER_FLAGS_5(name, flags, ret, t1, t2, t3, t4, t5) \
191
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
192
+ dh_ctype(t4), dh_ctype(t5)) DEF_HELPER_ATTR;
193
+
194
+#define DEF_HELPER_FLAGS_6(name, flags, ret, t1, t2, t3, t4, t5, t6) \
195
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
196
+ dh_ctype(t4), dh_ctype(t5), \
197
+ dh_ctype(t6)) DEF_HELPER_ATTR;
198
+
199
+#define DEF_HELPER_FLAGS_7(name, flags, ret, t1, t2, t3, t4, t5, t6, t7) \
200
+dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \
201
+ dh_ctype(t4), dh_ctype(t5), dh_ctype(t6), \
202
+ dh_ctype(t7)) DEF_HELPER_ATTR;
203
+
204
+#define IN_HELPER_PROTO
205
+
206
+#include HELPER_H
207
+
208
+#undef IN_HELPER_PROTO
209
+
210
+#undef DEF_HELPER_FLAGS_0
211
+#undef DEF_HELPER_FLAGS_1
212
+#undef DEF_HELPER_FLAGS_2
213
+#undef DEF_HELPER_FLAGS_3
214
+#undef DEF_HELPER_FLAGS_4
215
+#undef DEF_HELPER_FLAGS_5
216
+#undef DEF_HELPER_FLAGS_6
217
+#undef DEF_HELPER_FLAGS_7
218
+#undef DEF_HELPER_ATTR
24
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
219
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
25
index XXXXXXX..XXXXXXX 100644
220
index XXXXXXX..XXXXXXX 100644
26
--- a/accel/tcg/cputlb.c
221
--- a/accel/tcg/cputlb.c
27
+++ b/accel/tcg/cputlb.c
222
+++ b/accel/tcg/cputlb.c
28
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
223
@@ -XXX,XX +XXX,XX @@
29
* This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
224
#include "tcg/tcg.h"
30
* because of the side effect of io_writex changing memory layout.
225
#include "qemu/error-report.h"
31
*/
226
#include "exec/log.h"
32
-static void save_iotlb_data(CPUState *cs, hwaddr addr,
227
-#include "exec/helper-proto.h"
33
- MemoryRegionSection *section, hwaddr mr_offset)
228
+#include "exec/helper-proto-common.h"
34
+static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
229
#include "qemu/atomic.h"
35
+ hwaddr mr_offset)
230
#include "qemu/atomic128.h"
36
{
231
#include "exec/translate-all.h"
37
#ifdef CONFIG_PLUGIN
232
@@ -XXX,XX +XXX,XX @@
38
SavedIOTLB *saved = &cs->saved_iotlb;
39
- saved->addr = addr;
40
saved->section = section;
41
saved->mr_offset = mr_offset;
42
#endif
233
#endif
43
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
234
#include "tcg/tcg-ldst.h"
44
* The memory_region_dispatch may trigger a flush/resize
235
#include "tcg/oversized-guest.h"
45
* so for plugins we save the iotlb_data just in case.
236
-#include "exec/helper-proto.h"
46
*/
237
47
- save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
238
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
48
+ save_iotlb_data(cpu, section, mr_offset);
239
/* #define DEBUG_TLB */
49
240
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
50
if (!qemu_mutex_iothread_locked()) {
241
index XXXXXXX..XXXXXXX 100644
51
qemu_mutex_lock_iothread();
242
--- a/accel/tcg/plugin-gen.c
243
+++ b/accel/tcg/plugin-gen.c
244
@@ -XXX,XX +XXX,XX @@
245
#include "exec/exec-all.h"
246
#include "exec/plugin-gen.h"
247
#include "exec/translator.h"
248
-#include "exec/helper-proto.h"
249
+#include "exec/helper-proto-common.h"
250
251
#define HELPER_H "accel/tcg/plugin-helpers.h"
252
#include "exec/helper-info.c.inc"
253
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
254
index XXXXXXX..XXXXXXX 100644
255
--- a/accel/tcg/tcg-runtime-gvec.c
256
+++ b/accel/tcg/tcg-runtime-gvec.c
257
@@ -XXX,XX +XXX,XX @@
258
#include "qemu/osdep.h"
259
#include "qemu/host-utils.h"
260
#include "cpu.h"
261
-#include "exec/helper-proto.h"
262
+#include "exec/helper-proto-common.h"
263
#include "tcg/tcg-gvec-desc.h"
264
265
266
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
267
index XXXXXXX..XXXXXXX 100644
268
--- a/accel/tcg/tcg-runtime.c
269
+++ b/accel/tcg/tcg-runtime.c
270
@@ -XXX,XX +XXX,XX @@
271
#include "qemu/osdep.h"
272
#include "qemu/host-utils.h"
273
#include "cpu.h"
274
-#include "exec/helper-proto.h"
275
+#include "exec/helper-proto-common.h"
276
#include "exec/cpu_ldst.h"
277
#include "exec/exec-all.h"
278
#include "disas/disas.h"
279
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
280
index XXXXXXX..XXXXXXX 100644
281
--- a/target/arm/tcg/translate.c
282
+++ b/target/arm/tcg/translate.c
283
@@ -XXX,XX +XXX,XX @@
284
#include "translate.h"
285
#include "translate-a32.h"
286
#include "exec/gen-icount.h"
287
+#include "exec/helper-proto.h"
288
289
#define HELPER_H "helper.h"
290
#include "exec/helper-info.c.inc"
291
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
292
index XXXXXXX..XXXXXXX 100644
293
--- a/target/hexagon/translate.c
294
+++ b/target/hexagon/translate.c
295
@@ -XXX,XX +XXX,XX @@
296
#include "tcg/tcg-op.h"
297
#include "tcg/tcg-op-gvec.h"
298
#include "exec/helper-gen.h"
299
+#include "exec/helper-proto.h"
300
#include "exec/cpu_ldst.h"
301
#include "exec/log.h"
302
#include "internal.h"
52
--
303
--
53
2.34.1
304
2.34.1
54
305
55
306
diff view generated by jsdifflib
1
The value previously chosen overlaps GUSA_MASK.
1
Fixes an assert in tcg_gen_code that we don't accidentally
2
eliminate an insn_start during optimization.
2
3
3
Rename all DELAY_SLOT_* and GUSA_* defines to emphasize
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
that they are included in TB_FLAGs. Add aliases for the
5
FPSCR and SR bits that are included in TB_FLAGS, so that
6
we don't accidentally reassign those bits.
7
8
Fixes: 4da06fb3062 ("target/sh4: Implement prctl_unalign_sigbus")
9
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/856
10
Reviewed-by: Yoshinori Sato <ysato@users.sourceforge.jp>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
6
---
13
target/sh4/cpu.h | 56 +++++++++++++------------
7
target/sh4/translate.c | 15 ++++++++++++---
14
linux-user/sh4/signal.c | 6 +--
8
1 file changed, 12 insertions(+), 3 deletions(-)
15
target/sh4/cpu.c | 6 +--
16
target/sh4/helper.c | 6 +--
17
target/sh4/translate.c | 90 ++++++++++++++++++++++-------------------
18
5 files changed, 88 insertions(+), 76 deletions(-)
19
9
20
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/sh4/cpu.h
23
+++ b/target/sh4/cpu.h
24
@@ -XXX,XX +XXX,XX @@
25
#define FPSCR_RM_NEAREST (0 << 0)
26
#define FPSCR_RM_ZERO (1 << 0)
27
28
-#define DELAY_SLOT_MASK 0x7
29
-#define DELAY_SLOT (1 << 0)
30
-#define DELAY_SLOT_CONDITIONAL (1 << 1)
31
-#define DELAY_SLOT_RTE (1 << 2)
32
+#define TB_FLAG_DELAY_SLOT (1 << 0)
33
+#define TB_FLAG_DELAY_SLOT_COND (1 << 1)
34
+#define TB_FLAG_DELAY_SLOT_RTE (1 << 2)
35
+#define TB_FLAG_PENDING_MOVCA (1 << 3)
36
+#define TB_FLAG_GUSA_SHIFT 4 /* [11:4] */
37
+#define TB_FLAG_GUSA_EXCLUSIVE (1 << 12)
38
+#define TB_FLAG_UNALIGN (1 << 13)
39
+#define TB_FLAG_SR_FD (1 << SR_FD) /* 15 */
40
+#define TB_FLAG_FPSCR_PR FPSCR_PR /* 19 */
41
+#define TB_FLAG_FPSCR_SZ FPSCR_SZ /* 20 */
42
+#define TB_FLAG_FPSCR_FR FPSCR_FR /* 21 */
43
+#define TB_FLAG_SR_RB (1 << SR_RB) /* 29 */
44
+#define TB_FLAG_SR_MD (1 << SR_MD) /* 30 */
45
46
-#define TB_FLAG_PENDING_MOVCA (1 << 3)
47
-#define TB_FLAG_UNALIGN (1 << 4)
48
-
49
-#define GUSA_SHIFT 4
50
-#ifdef CONFIG_USER_ONLY
51
-#define GUSA_EXCLUSIVE (1 << 12)
52
-#define GUSA_MASK ((0xff << GUSA_SHIFT) | GUSA_EXCLUSIVE)
53
-#else
54
-/* Provide dummy versions of the above to allow tests against tbflags
55
- to be elided while avoiding ifdefs. */
56
-#define GUSA_EXCLUSIVE 0
57
-#define GUSA_MASK 0
58
-#endif
59
-
60
-#define TB_FLAG_ENVFLAGS_MASK (DELAY_SLOT_MASK | GUSA_MASK)
61
+#define TB_FLAG_DELAY_SLOT_MASK (TB_FLAG_DELAY_SLOT | \
62
+ TB_FLAG_DELAY_SLOT_COND | \
63
+ TB_FLAG_DELAY_SLOT_RTE)
64
+#define TB_FLAG_GUSA_MASK ((0xff << TB_FLAG_GUSA_SHIFT) | \
65
+ TB_FLAG_GUSA_EXCLUSIVE)
66
+#define TB_FLAG_FPSCR_MASK (TB_FLAG_FPSCR_PR | \
67
+ TB_FLAG_FPSCR_SZ | \
68
+ TB_FLAG_FPSCR_FR)
69
+#define TB_FLAG_SR_MASK (TB_FLAG_SR_FD | \
70
+ TB_FLAG_SR_RB | \
71
+ TB_FLAG_SR_MD)
72
+#define TB_FLAG_ENVFLAGS_MASK (TB_FLAG_DELAY_SLOT_MASK | \
73
+ TB_FLAG_GUSA_MASK)
74
75
typedef struct tlb_t {
76
uint32_t vpn;        /* virtual page number */
77
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
78
{
79
/* The instruction in a RTE delay slot is fetched in privileged
80
mode, but executed in user mode. */
81
- if (ifetch && (env->flags & DELAY_SLOT_RTE)) {
82
+ if (ifetch && (env->flags & TB_FLAG_DELAY_SLOT_RTE)) {
83
return 0;
84
} else {
85
return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
86
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
87
{
88
*pc = env->pc;
89
/* For a gUSA region, notice the end of the region. */
90
- *cs_base = env->flags & GUSA_MASK ? env->gregs[0] : 0;
91
- *flags = env->flags /* TB_FLAG_ENVFLAGS_MASK: bits 0-2, 4-12 */
92
- | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
93
- | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
94
- | (env->sr & (1u << SR_FD)) /* Bit 15 */
95
+ *cs_base = env->flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0;
96
+ *flags = env->flags
97
+ | (env->fpscr & TB_FLAG_FPSCR_MASK)
98
+ | (env->sr & TB_FLAG_SR_MASK)
99
| (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
100
#ifdef CONFIG_USER_ONLY
101
*flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
102
diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c
103
index XXXXXXX..XXXXXXX 100644
104
--- a/linux-user/sh4/signal.c
105
+++ b/linux-user/sh4/signal.c
106
@@ -XXX,XX +XXX,XX @@ static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
107
__get_user(regs->fpul, &sc->sc_fpul);
108
109
regs->tra = -1; /* disable syscall checks */
110
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
111
+ regs->flags = 0;
112
}
113
114
void setup_frame(int sig, struct target_sigaction *ka,
115
@@ -XXX,XX +XXX,XX @@ void setup_frame(int sig, struct target_sigaction *ka,
116
regs->gregs[5] = 0;
117
regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
118
regs->pc = (unsigned long) ka->_sa_handler;
119
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
120
+ regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK);
121
122
unlock_user_struct(frame, frame_addr, 1);
123
return;
124
@@ -XXX,XX +XXX,XX @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
125
regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
126
regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
127
regs->pc = (unsigned long) ka->_sa_handler;
128
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
129
+ regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK);
130
131
unlock_user_struct(frame, frame_addr, 1);
132
return;
133
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/sh4/cpu.c
136
+++ b/target/sh4/cpu.c
137
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_synchronize_from_tb(CPUState *cs,
138
SuperHCPU *cpu = SUPERH_CPU(cs);
139
140
cpu->env.pc = tb_pc(tb);
141
- cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
142
+ cpu->env.flags = tb->flags;
143
}
144
145
#ifndef CONFIG_USER_ONLY
146
@@ -XXX,XX +XXX,XX @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
147
SuperHCPU *cpu = SUPERH_CPU(cs);
148
CPUSH4State *env = &cpu->env;
149
150
- if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
151
+ if ((env->flags & (TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND))
152
&& env->pc != tb_pc(tb)) {
153
env->pc -= 2;
154
- env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
155
+ env->flags &= ~(TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND);
156
return true;
157
}
158
return false;
159
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/sh4/helper.c
162
+++ b/target/sh4/helper.c
163
@@ -XXX,XX +XXX,XX @@ void superh_cpu_do_interrupt(CPUState *cs)
164
env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
165
env->lock_addr = -1;
166
167
- if (env->flags & DELAY_SLOT_MASK) {
168
+ if (env->flags & TB_FLAG_DELAY_SLOT_MASK) {
169
/* Branch instruction should be executed again before delay slot. */
170
    env->spc -= 2;
171
    /* Clear flags for exception/interrupt routine. */
172
- env->flags &= ~DELAY_SLOT_MASK;
173
+ env->flags &= ~TB_FLAG_DELAY_SLOT_MASK;
174
}
175
176
if (do_exp) {
177
@@ -XXX,XX +XXX,XX @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
178
CPUSH4State *env = &cpu->env;
179
180
/* Delay slots are indivisible, ignore interrupts */
181
- if (env->flags & DELAY_SLOT_MASK) {
182
+ if (env->flags & TB_FLAG_DELAY_SLOT_MASK) {
183
return false;
184
} else {
185
superh_cpu_do_interrupt(cs);
186
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
10
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
187
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
188
--- a/target/sh4/translate.c
12
--- a/target/sh4/translate.c
189
+++ b/target/sh4/translate.c
13
+++ b/target/sh4/translate.c
190
@@ -XXX,XX +XXX,XX @@ void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
191
         i, env->gregs[i], i + 1, env->gregs[i + 1],
192
         i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
193
}
194
- if (env->flags & DELAY_SLOT) {
195
+ if (env->flags & TB_FLAG_DELAY_SLOT) {
196
qemu_printf("in delay slot (delayed_pc=0x%08x)\n",
197
         env->delayed_pc);
198
- } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
199
+ } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
200
qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n",
201
         env->delayed_pc);
202
- } else if (env->flags & DELAY_SLOT_RTE) {
203
+ } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
204
qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
205
env->delayed_pc);
206
}
207
@@ -XXX,XX +XXX,XX @@ static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
208
209
static inline bool use_exit_tb(DisasContext *ctx)
210
{
211
- return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
212
+ return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
213
}
214
215
static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
216
@@ -XXX,XX +XXX,XX @@ static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
217
TCGLabel *l1 = gen_new_label();
218
TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
219
220
- if (ctx->tbflags & GUSA_EXCLUSIVE) {
221
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
222
/* When in an exclusive region, we must continue to the end.
223
Therefore, exit the region on a taken branch, but otherwise
224
fall through to the next instruction. */
225
tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
226
- tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
227
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
228
/* Note that this won't actually use a goto_tb opcode because we
229
disallow it in use_goto_tb, but it handles exit + singlestep. */
230
gen_goto_tb(ctx, 0, dest);
231
@@ -XXX,XX +XXX,XX @@ static void gen_delayed_conditional_jump(DisasContext * ctx)
232
tcg_gen_mov_i32(ds, cpu_delayed_cond);
233
tcg_gen_discard_i32(cpu_delayed_cond);
234
235
- if (ctx->tbflags & GUSA_EXCLUSIVE) {
236
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
237
/* When in an exclusive region, we must continue to the end.
238
Therefore, exit the region on a taken branch, but otherwise
239
fall through to the next instruction. */
240
tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
241
242
/* Leave the gUSA region. */
243
- tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
244
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
245
gen_jump(ctx);
246
247
gen_set_label(l1);
248
@@ -XXX,XX +XXX,XX @@ static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
249
#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
250
251
#define CHECK_NOT_DELAY_SLOT \
252
- if (ctx->envflags & DELAY_SLOT_MASK) { \
253
- goto do_illegal_slot; \
254
+ if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \
255
+ goto do_illegal_slot; \
256
}
257
258
#define CHECK_PRIVILEGED \
259
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
260
case 0x000b:        /* rts */
261
    CHECK_NOT_DELAY_SLOT
262
    tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
263
- ctx->envflags |= DELAY_SLOT;
264
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
265
    ctx->delayed_pc = (uint32_t) - 1;
266
    return;
267
case 0x0028:        /* clrmac */
268
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
269
    CHECK_NOT_DELAY_SLOT
270
gen_write_sr(cpu_ssr);
271
    tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
272
- ctx->envflags |= DELAY_SLOT_RTE;
273
+ ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
274
    ctx->delayed_pc = (uint32_t) - 1;
275
ctx->base.is_jmp = DISAS_STOP;
276
    return;
277
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
278
    return;
279
case 0xe000:        /* mov #imm,Rn */
280
#ifdef CONFIG_USER_ONLY
281
- /* Detect the start of a gUSA region. If so, update envflags
282
- and end the TB. This will allow us to see the end of the
283
- region (stored in R0) in the next TB. */
284
+ /*
285
+ * Detect the start of a gUSA region (mov #-n, r15).
286
+ * If so, update envflags and end the TB. This will allow us
287
+ * to see the end of the region (stored in R0) in the next TB.
288
+ */
289
if (B11_8 == 15 && B7_0s < 0 &&
290
(tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
291
- ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
292
+ ctx->envflags =
293
+ deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
294
ctx->base.is_jmp = DISAS_STOP;
295
}
296
#endif
297
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
298
case 0xa000:        /* bra disp */
299
    CHECK_NOT_DELAY_SLOT
300
ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
301
- ctx->envflags |= DELAY_SLOT;
302
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
303
    return;
304
case 0xb000:        /* bsr disp */
305
    CHECK_NOT_DELAY_SLOT
306
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
307
ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
308
- ctx->envflags |= DELAY_SLOT;
309
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
310
    return;
311
}
312
313
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
314
    CHECK_NOT_DELAY_SLOT
315
tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
316
ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
317
- ctx->envflags |= DELAY_SLOT_CONDITIONAL;
318
+ ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
319
    return;
320
case 0x8900:        /* bt label */
321
    CHECK_NOT_DELAY_SLOT
322
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
323
    CHECK_NOT_DELAY_SLOT
324
tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
325
ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
326
- ctx->envflags |= DELAY_SLOT_CONDITIONAL;
327
+ ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
328
    return;
329
case 0x8800:        /* cmp/eq #imm,R0 */
330
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
331
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
332
case 0x0023:        /* braf Rn */
333
    CHECK_NOT_DELAY_SLOT
334
tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
335
- ctx->envflags |= DELAY_SLOT;
336
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
337
    ctx->delayed_pc = (uint32_t) - 1;
338
    return;
339
case 0x0003:        /* bsrf Rn */
340
    CHECK_NOT_DELAY_SLOT
341
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
342
    tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
343
- ctx->envflags |= DELAY_SLOT;
344
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
345
    ctx->delayed_pc = (uint32_t) - 1;
346
    return;
347
case 0x4015:        /* cmp/pl Rn */
348
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
349
case 0x402b:        /* jmp @Rn */
350
    CHECK_NOT_DELAY_SLOT
351
    tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
352
- ctx->envflags |= DELAY_SLOT;
353
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
354
    ctx->delayed_pc = (uint32_t) - 1;
355
    return;
356
case 0x400b:        /* jsr @Rn */
357
    CHECK_NOT_DELAY_SLOT
358
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
359
    tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
360
- ctx->envflags |= DELAY_SLOT;
361
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
362
    ctx->delayed_pc = (uint32_t) - 1;
363
    return;
364
case 0x400e:        /* ldc Rm,SR */
365
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
366
fflush(stderr);
367
#endif
368
do_illegal:
369
- if (ctx->envflags & DELAY_SLOT_MASK) {
370
+ if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
371
do_illegal_slot:
372
gen_save_cpu_state(ctx, true);
373
gen_helper_raise_slot_illegal_instruction(cpu_env);
374
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
375
376
do_fpu_disabled:
377
gen_save_cpu_state(ctx, true);
378
- if (ctx->envflags & DELAY_SLOT_MASK) {
379
+ if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
380
gen_helper_raise_slot_fpu_disable(cpu_env);
381
} else {
382
gen_helper_raise_fpu_disable(cpu_env);
383
@@ -XXX,XX +XXX,XX @@ static void decode_opc(DisasContext * ctx)
384
385
_decode_opc(ctx);
386
387
- if (old_flags & DELAY_SLOT_MASK) {
388
+ if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
389
/* go out of the delay slot */
390
- ctx->envflags &= ~DELAY_SLOT_MASK;
391
+ ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
392
393
/* When in an exclusive region, we must continue to the end
394
for conditional branches. */
395
- if (ctx->tbflags & GUSA_EXCLUSIVE
396
- && old_flags & DELAY_SLOT_CONDITIONAL) {
397
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
398
+ && old_flags & TB_FLAG_DELAY_SLOT_COND) {
399
gen_delayed_conditional_jump(ctx);
400
return;
401
}
402
/* Otherwise this is probably an invalid gUSA region.
403
Drop the GUSA bits so the next TB doesn't see them. */
404
- ctx->envflags &= ~GUSA_MASK;
405
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
406
407
tcg_gen_movi_i32(cpu_flags, ctx->envflags);
408
- if (old_flags & DELAY_SLOT_CONDITIONAL) {
409
+ if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
410
     gen_delayed_conditional_jump(ctx);
411
} else {
412
gen_jump(ctx);
413
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
14
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
414
}
415
15
416
/* The entire region has been translated. */
16
/* The entire region has been translated. */
417
- ctx->envflags &= ~GUSA_MASK;
17
ctx->envflags &= ~TB_FLAG_GUSA_MASK;
418
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
18
- ctx->base.pc_next = pc_end;
19
- ctx->base.num_insns += max_insns - 1;
20
- return;
21
+ goto done;
22
23
fail:
24
qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
25
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
26
purposes of accounting within the TB. We might as well report the
27
entire region consumed via ctx->base.pc_next so that it's immediately
28
available in the disassembly dump. */
29
+
30
+ done:
419
ctx->base.pc_next = pc_end;
31
ctx->base.pc_next = pc_end;
420
ctx->base.num_insns += max_insns - 1;
32
ctx->base.num_insns += max_insns - 1;
421
return;
33
+
422
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
34
+ /*
423
35
+ * Emit insn_start to cover each of the insns in the region.
424
/* Restart with the EXCLUSIVE bit set, within a TB run via
36
+ * This matches an assert in tcg.c making sure that we have
425
cpu_exec_step_atomic holding the exclusive lock. */
37
+ * tb->icount * insn_start.
426
- ctx->envflags |= GUSA_EXCLUSIVE;
38
+ */
427
+ ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
39
+ for (i = 1; i < max_insns; ++i) {
428
gen_save_cpu_state(ctx, false);
40
+ tcg_gen_insn_start(pc + i * 2, ctx->envflags);
429
gen_helper_exclusive(cpu_env);
41
+ }
430
ctx->base.is_jmp = DISAS_NORETURN;
42
}
431
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
43
#endif
432
(tbflags & (1 << SR_RB))) * 0x10;
44
433
ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
434
435
- if (tbflags & GUSA_MASK) {
436
+#ifdef CONFIG_USER_ONLY
437
+ if (tbflags & TB_FLAG_GUSA_MASK) {
438
+ /* In gUSA exclusive region. */
439
uint32_t pc = ctx->base.pc_next;
440
uint32_t pc_end = ctx->base.tb->cs_base;
441
- int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
442
+ int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
443
int max_insns = (pc_end - pc) / 2;
444
445
if (pc != pc_end + backup || max_insns < 2) {
446
/* This is a malformed gUSA region. Don't do anything special,
447
since the interpreter is likely to get confused. */
448
- ctx->envflags &= ~GUSA_MASK;
449
- } else if (tbflags & GUSA_EXCLUSIVE) {
450
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
451
+ } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
452
/* Regardless of single-stepping or the end of the page,
453
we must complete execution of the gUSA region while
454
holding the exclusive lock. */
455
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
456
return;
457
}
458
}
459
+#endif
460
461
/* Since the ISA is fixed-width, we can bound by the number
462
of instructions remaining on the page. */
463
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
464
DisasContext *ctx = container_of(dcbase, DisasContext, base);
465
466
#ifdef CONFIG_USER_ONLY
467
- if (unlikely(ctx->envflags & GUSA_MASK)
468
- && !(ctx->envflags & GUSA_EXCLUSIVE)) {
469
+ if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
470
+ && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
471
/* We're in an gUSA region, and we have not already fallen
472
back on using an exclusive region. Attempt to parse the
473
region into a single supported atomic operation. Failure
474
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
475
{
476
DisasContext *ctx = container_of(dcbase, DisasContext, base);
477
478
- if (ctx->tbflags & GUSA_EXCLUSIVE) {
479
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
480
/* Ending the region of exclusivity. Clear the bits. */
481
- ctx->envflags &= ~GUSA_MASK;
482
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
483
}
484
485
switch (ctx->base.is_jmp) {
486
--
45
--
487
2.34.1
46
2.34.1
47
48
diff view generated by jsdifflib
New patch
1
This will enable replacement of TARGET_INSN_START_WORDS in tcg.c.
2
Split out "tcg/insn-start-words.h" and use it in target/.
1
3
4
Reviewed-by: Anton Johansson <anjo@rev.ng>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/insn-start-words.h | 17 +++++++++++++++++
8
include/tcg/tcg-op.h | 8 ++++----
9
include/tcg/tcg-opc.h | 6 +++---
10
include/tcg/tcg.h | 9 ++-------
11
accel/tcg/perf.c | 8 ++++++--
12
accel/tcg/translate-all.c | 20 +++++++++++++-------
13
target/i386/helper.c | 2 +-
14
target/openrisc/sys_helper.c | 2 +-
15
tcg/tcg.c | 16 +++++++++++-----
16
9 files changed, 58 insertions(+), 30 deletions(-)
17
create mode 100644 include/tcg/insn-start-words.h
18
19
diff --git a/include/tcg/insn-start-words.h b/include/tcg/insn-start-words.h
20
new file mode 100644
21
index XXXXXXX..XXXXXXX
22
--- /dev/null
23
+++ b/include/tcg/insn-start-words.h
24
@@ -XXX,XX +XXX,XX @@
25
+/* SPDX-License-Identifier: MIT */
26
+/*
27
+ * Define TARGET_INSN_START_WORDS
28
+ * Copyright (c) 2008 Fabrice Bellard
29
+ */
30
+
31
+#ifndef TARGET_INSN_START_WORDS
32
+
33
+#include "cpu.h"
34
+
35
+#ifndef TARGET_INSN_START_EXTRA_WORDS
36
+# define TARGET_INSN_START_WORDS 1
37
+#else
38
+# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
39
+#endif
40
+
41
+#endif /* TARGET_INSN_START_WORDS */
42
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/include/tcg/tcg-op.h
45
+++ b/include/tcg/tcg-op.h
46
@@ -XXX,XX +XXX,XX @@
47
# error
48
#endif
49
50
-#if TARGET_INSN_START_WORDS == 1
51
+#ifndef TARGET_INSN_START_EXTRA_WORDS
52
static inline void tcg_gen_insn_start(target_ulong pc)
53
{
54
TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 64 / TCG_TARGET_REG_BITS);
55
tcg_set_insn_start_param(op, 0, pc);
56
}
57
-#elif TARGET_INSN_START_WORDS == 2
58
+#elif TARGET_INSN_START_EXTRA_WORDS == 1
59
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1)
60
{
61
TCGOp *op = tcg_emit_op(INDEX_op_insn_start, 2 * 64 / TCG_TARGET_REG_BITS);
62
tcg_set_insn_start_param(op, 0, pc);
63
tcg_set_insn_start_param(op, 1, a1);
64
}
65
-#elif TARGET_INSN_START_WORDS == 3
66
+#elif TARGET_INSN_START_EXTRA_WORDS == 2
67
static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
68
target_ulong a2)
69
{
70
@@ -XXX,XX +XXX,XX @@ static inline void tcg_gen_insn_start(target_ulong pc, target_ulong a1,
71
tcg_set_insn_start_param(op, 2, a2);
72
}
73
#else
74
-# error "Unhandled number of operands to insn_start"
75
+#error Unhandled TARGET_INSN_START_EXTRA_WORDS value
76
#endif
77
78
#if TARGET_LONG_BITS == 32
79
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
80
index XXXXXXX..XXXXXXX 100644
81
--- a/include/tcg/tcg-opc.h
82
+++ b/include/tcg/tcg-opc.h
83
@@ -XXX,XX +XXX,XX @@ DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64))
84
85
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
86
87
-/* QEMU specific */
88
-DEF(insn_start, 0, 0, DATA64_ARGS * TARGET_INSN_START_WORDS,
89
- TCG_OPF_NOT_PRESENT)
90
+/* There are tcg_ctx->insn_start_words here, not just one. */
91
+DEF(insn_start, 0, 0, DATA64_ARGS, TCG_OPF_NOT_PRESENT)
92
+
93
DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
94
DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
95
DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
96
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
97
index XXXXXXX..XXXXXXX 100644
98
--- a/include/tcg/tcg.h
99
+++ b/include/tcg/tcg.h
100
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
101
#define TCG_TARGET_HAS_v256 0
102
#endif
103
104
-#ifndef TARGET_INSN_START_EXTRA_WORDS
105
-# define TARGET_INSN_START_WORDS 1
106
-#else
107
-# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
108
-#endif
109
-
110
typedef enum TCGOpcode {
111
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
112
#include "tcg/tcg-opc.h"
113
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
114
uint8_t page_bits;
115
uint8_t tlb_dyn_max_bits;
116
#endif
117
+ uint8_t insn_start_words;
118
119
TCGRegSet reserved_regs;
120
intptr_t current_frame_offset;
121
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
122
TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
123
124
uint16_t gen_insn_end_off[TCG_MAX_INSNS];
125
- uint64_t gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
126
+ uint64_t *gen_insn_data;
127
128
/* Exit to translator on overflow. */
129
sigjmp_buf jmp_trans;
130
diff --git a/accel/tcg/perf.c b/accel/tcg/perf.c
131
index XXXXXXX..XXXXXXX 100644
132
--- a/accel/tcg/perf.c
133
+++ b/accel/tcg/perf.c
134
@@ -XXX,XX +XXX,XX @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
135
const void *start)
136
{
137
struct debuginfo_query *q;
138
- size_t insn;
139
+ size_t insn, start_words;
140
+ uint64_t *gen_insn_data;
141
142
if (!perfmap && !jitdump) {
143
return;
144
@@ -XXX,XX +XXX,XX @@ void perf_report_code(uint64_t guest_pc, TranslationBlock *tb,
145
debuginfo_lock();
146
147
/* Query debuginfo for each guest instruction. */
148
+ gen_insn_data = tcg_ctx->gen_insn_data;
149
+ start_words = tcg_ctx->insn_start_words;
150
+
151
for (insn = 0; insn < tb->icount; insn++) {
152
/* FIXME: This replicates the restore_state_to_opc() logic. */
153
- q[insn].address = tcg_ctx->gen_insn_data[insn][0];
154
+ q[insn].address = gen_insn_data[insn * start_words + 0];
155
if (tb_cflags(tb) & CF_PCREL) {
156
q[insn].address |= (guest_pc & TARGET_PAGE_MASK);
157
} else {
158
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/accel/tcg/translate-all.c
161
+++ b/accel/tcg/translate-all.c
162
@@ -XXX,XX +XXX,XX @@
163
#include "tb-context.h"
164
#include "internal.h"
165
#include "perf.h"
166
+#include "tcg/insn-start-words.h"
167
168
TBContext tb_ctx;
169
170
@@ -XXX,XX +XXX,XX @@ static int64_t decode_sleb128(const uint8_t **pp)
171
static int encode_search(TranslationBlock *tb, uint8_t *block)
172
{
173
uint8_t *highwater = tcg_ctx->code_gen_highwater;
174
+ uint64_t *insn_data = tcg_ctx->gen_insn_data;
175
+ uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
176
uint8_t *p = block;
177
int i, j, n;
178
179
for (i = 0, n = tb->icount; i < n; ++i) {
180
- uint64_t prev;
181
+ uint64_t prev, curr;
182
183
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
184
if (i == 0) {
185
prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
186
} else {
187
- prev = tcg_ctx->gen_insn_data[i - 1][j];
188
+ prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
189
}
190
- p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
191
+ curr = insn_data[i * TARGET_INSN_START_WORDS + j];
192
+ p = encode_sleb128(p, curr - prev);
193
}
194
- prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
195
- p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
196
+ prev = (i == 0 ? 0 : insn_end_off[i - 1]);
197
+ curr = insn_end_off[i];
198
+ p = encode_sleb128(p, curr - prev);
199
200
/* Test for (pending) buffer overflow. The assumption is that any
201
one row beginning below the high water mark cannot overrun
202
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
203
tcg_ctx->tlb_fast_offset =
204
(int)offsetof(ArchCPU, neg.tlb.f) - (int)offsetof(ArchCPU, env);
205
#endif
206
+ tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
207
208
tb_overflow:
209
210
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
211
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
212
fprintf(logfile,
213
" -- guest addr 0x%016" PRIx64 " + tb prologue\n",
214
- tcg_ctx->gen_insn_data[insn][0]);
215
+ tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
216
chunk_start = tcg_ctx->gen_insn_end_off[insn];
217
disas(logfile, tb->tc.ptr, chunk_start);
218
219
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
220
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
221
if (chunk_end > chunk_start) {
222
fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
223
- tcg_ctx->gen_insn_data[insn][0]);
224
+ tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
225
disas(logfile, tb->tc.ptr + chunk_start,
226
chunk_end - chunk_start);
227
chunk_start = chunk_end;
228
diff --git a/target/i386/helper.c b/target/i386/helper.c
229
index XXXXXXX..XXXXXXX 100644
230
--- a/target/i386/helper.c
231
+++ b/target/i386/helper.c
232
@@ -XXX,XX +XXX,XX @@
233
#endif
234
#include "qemu/log.h"
235
#ifdef CONFIG_TCG
236
-#include "tcg/tcg.h"
237
+#include "tcg/insn-start-words.h"
238
#endif
239
240
void cpu_sync_avx_hflag(CPUX86State *env)
241
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
242
index XXXXXXX..XXXXXXX 100644
243
--- a/target/openrisc/sys_helper.c
244
+++ b/target/openrisc/sys_helper.c
245
@@ -XXX,XX +XXX,XX @@
246
#ifndef CONFIG_USER_ONLY
247
#include "hw/boards.h"
248
#endif
249
-#include "tcg/tcg.h"
250
+#include "tcg/insn-start-words.h"
251
252
#define TO_SPR(group, number) (((group) << 11) + (number))
253
254
diff --git a/tcg/tcg.c b/tcg/tcg.c
255
index XXXXXXX..XXXXXXX 100644
256
--- a/tcg/tcg.c
257
+++ b/tcg/tcg.c
258
@@ -XXX,XX +XXX,XX @@ void tcg_func_start(TCGContext *s)
259
tcg_debug_assert(s->tlb_fast_offset < 0);
260
tcg_debug_assert(s->tlb_fast_offset >= MIN_TLB_MASK_TABLE_OFS);
261
#endif
262
+
263
+ tcg_debug_assert(s->insn_start_words > 0);
264
}
265
266
static TCGTemp *tcg_temp_alloc(TCGContext *s)
267
@@ -XXX,XX +XXX,XX @@ static void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
268
nb_oargs = 0;
269
col += ne_fprintf(f, "\n ----");
270
271
- for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
272
+ for (i = 0, k = s->insn_start_words; i < k; ++i) {
273
col += ne_fprintf(f, " %016" PRIx64,
274
tcg_get_insn_start_param(op, i));
275
}
276
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
277
#ifdef CONFIG_PROFILER
278
TCGProfile *prof = &s->prof;
279
#endif
280
- int i, num_insns;
281
+ int i, start_words, num_insns;
282
TCGOp *op;
283
284
#ifdef CONFIG_PROFILER
285
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
286
s->pool_labels = NULL;
287
#endif
288
289
+ start_words = s->insn_start_words;
290
+ s->gen_insn_data =
291
+ tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
292
+
293
num_insns = -1;
294
QTAILQ_FOREACH(op, &s->ops, link) {
295
TCGOpcode opc = op->opc;
296
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
297
assert(s->gen_insn_end_off[num_insns] == off);
298
}
299
num_insns++;
300
- for (i = 0; i < TARGET_INSN_START_WORDS; ++i) {
301
- s->gen_insn_data[num_insns][i] =
302
+ for (i = 0; i < start_words; ++i) {
303
+ s->gen_insn_data[num_insns * start_words + i] =
304
tcg_get_insn_start_param(op, i);
305
}
306
break;
307
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
308
return -2;
309
}
310
}
311
- tcg_debug_assert(num_insns >= 0);
312
+ tcg_debug_assert(num_insns + 1 == s->gen_tb->icount);
313
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
314
315
/* Generate TB finalization at the end of block */
316
--
317
2.34.1
diff view generated by jsdifflib
1
Bool is more appropriate type for the alloc parameter.
1
This replaces of TCG_GUEST_DEFAULT_MO in tcg-op-ldst.c.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/translate-all.c | 14 +++++++-------
6
include/tcg/tcg.h | 1 +
8
1 file changed, 7 insertions(+), 7 deletions(-)
7
accel/tcg/translate-all.c | 5 +++++
8
tcg/tcg-op-ldst.c | 4 +---
9
3 files changed, 7 insertions(+), 3 deletions(-)
9
10
11
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/tcg/tcg.h
14
+++ b/include/tcg/tcg.h
15
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
16
uint8_t tlb_dyn_max_bits;
17
#endif
18
uint8_t insn_start_words;
19
+ TCGBar guest_mo;
20
21
TCGRegSet reserved_regs;
22
intptr_t current_frame_offset;
10
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
23
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
11
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/translate-all.c
25
--- a/accel/tcg/translate-all.c
13
+++ b/accel/tcg/translate-all.c
26
+++ b/accel/tcg/translate-all.c
14
@@ -XXX,XX +XXX,XX @@ void page_init(void)
27
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
28
(int)offsetof(ArchCPU, neg.tlb.f) - (int)offsetof(ArchCPU, env);
15
#endif
29
#endif
16
}
30
tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
17
31
+#ifdef TCG_GUEST_DEFAULT_MO
18
-static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
32
+ tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
19
+static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
33
+#else
34
+ tcg_ctx->guest_mo = TCG_MO_ALL;
35
+#endif
36
37
tb_overflow:
38
39
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/tcg/tcg-op-ldst.c
42
+++ b/tcg/tcg-op-ldst.c
43
@@ -XXX,XX +XXX,XX @@ static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
44
45
static void tcg_gen_req_mo(TCGBar type)
20
{
46
{
21
PageDesc *pd;
47
-#ifdef TCG_GUEST_DEFAULT_MO
22
void **lp;
48
- type &= TCG_GUEST_DEFAULT_MO;
23
@@ -XXX,XX +XXX,XX @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
49
-#endif
24
50
+ type &= tcg_ctx->guest_mo;
25
static inline PageDesc *page_find(tb_page_addr_t index)
51
type &= ~TCG_TARGET_DEFAULT_MO;
26
{
52
if (type) {
27
- return page_find_alloc(index, 0);
53
tcg_gen_mb(type | TCG_BAR_SC);
28
+ return page_find_alloc(index, false);
29
}
30
31
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
32
- PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
33
+ PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc);
34
35
/* In user-mode page locks aren't used; mmap_lock is enough */
36
#ifdef CONFIG_USER_ONLY
37
@@ -XXX,XX +XXX,XX @@ static inline void page_unlock(PageDesc *pd)
38
/* lock the page(s) of a TB in the correct acquisition order */
39
static inline void page_lock_tb(const TranslationBlock *tb)
40
{
41
- page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
42
+ page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false);
43
}
44
45
static inline void page_unlock_tb(const TranslationBlock *tb)
46
@@ -XXX,XX +XXX,XX @@ void page_collection_unlock(struct page_collection *set)
47
#endif /* !CONFIG_USER_ONLY */
48
49
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
50
- PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
51
+ PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
52
{
53
PageDesc *p1, *p2;
54
tb_page_addr_t page1;
55
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
56
* Note that inserting into the hash table first isn't an option, since
57
* we can only insert TBs that are fully initialized.
58
*/
59
- page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
60
+ page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
61
tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
62
if (p2) {
63
tb_page_add(p2, tb, 1, phys_page2);
64
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
65
for (addr = start, len = end - start;
66
len != 0;
67
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
68
- PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
69
+ PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true);
70
71
/* If the write protection bit is set, then we invalidate
72
the code inside. */
73
--
54
--
74
2.34.1
55
2.34.1
75
56
76
57
diff view generated by jsdifflib
New patch
1
The replacement isn't ideal, as the raw count of bits
2
is not easily synced with exec/cpu-all.h, but it does
3
remove from tcg.h the target dependency on TARGET_PAGE_BITS_MIN
4
which is built into TLB_FLAGS_MASK.
1
5
6
Reviewed-by: Anton Johansson <anjo@rev.ng>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/exec/cpu-all.h | 3 +++
10
include/tcg/tcg.h | 4 ----
11
tcg/tcg-op-ldst.c | 18 ++++++++++++++++--
12
3 files changed, 19 insertions(+), 6 deletions(-)
13
14
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu-all.h
17
+++ b/include/exec/cpu-all.h
18
@@ -XXX,XX +XXX,XX @@ CPUArchState *cpu_copy(CPUArchState *env);
19
*
20
* Use TARGET_PAGE_BITS_MIN so that these bits are constant
21
* when TARGET_PAGE_BITS_VARY is in effect.
22
+ *
23
+ * The count, if not the placement of these bits is known
24
+ * to tcg/tcg-op-ldst.c, check_max_alignment().
25
*/
26
/* Zero if TLB entry is valid. */
27
#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
28
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/include/tcg/tcg.h
31
+++ b/include/tcg/tcg.h
32
@@ -XXX,XX +XXX,XX @@ static inline unsigned get_alignment_bits(MemOp memop)
33
/* A specific alignment requirement. */
34
a = a >> MO_ASHIFT;
35
}
36
-#if defined(CONFIG_SOFTMMU)
37
- /* The requested alignment cannot overlap the TLB flags. */
38
- tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
39
-#endif
40
return a;
41
}
42
43
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/tcg-op-ldst.c
46
+++ b/tcg/tcg-op-ldst.c
47
@@ -XXX,XX +XXX,XX @@
48
#include "tcg-internal.h"
49
50
51
-static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
52
+static void check_max_alignment(unsigned a_bits)
53
+{
54
+#if defined(CONFIG_SOFTMMU)
55
+ /*
56
+ * The requested alignment cannot overlap the TLB flags.
57
+ * FIXME: Must keep the count up-to-date with "exec/cpu-all.h".
58
+ */
59
+ tcg_debug_assert(a_bits + 6 <= tcg_ctx->page_bits);
60
+#endif
61
+}
62
+
63
+static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
64
{
65
- /* Trigger the asserts within as early as possible. */
66
unsigned a_bits = get_alignment_bits(op);
67
68
+ check_max_alignment(a_bits);
69
+
70
/* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */
71
if (a_bits == (op & MO_SIZE)) {
72
op = (op & ~MO_AMASK) | MO_ALIGN;
73
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
74
TCGv_i64 ext_addr = NULL;
75
TCGOpcode opc;
76
77
+ check_max_alignment(get_alignment_bits(memop));
78
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
79
80
/* TODO: For now, force 32-bit hosts to use the helper. */
81
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
82
TCGv_i64 ext_addr = NULL;
83
TCGOpcode opc;
84
85
+ check_max_alignment(get_alignment_bits(memop));
86
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
87
88
/* TODO: For now, force 32-bit hosts to use the helper. */
89
--
90
2.34.1
diff view generated by jsdifflib
New patch
1
Create tcg/tcg-op-gvec-common.h, moving everything that does not
2
concern TARGET_LONG_BITS. Adjust tcg-op-gvec.c to use the new header.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-op-gvec-common.h | 426 +++++++++++++++++++++++++++++
8
include/tcg/tcg-op-gvec.h | 444 +------------------------------
9
tcg/tcg-op-gvec.c | 2 +-
10
3 files changed, 437 insertions(+), 435 deletions(-)
11
create mode 100644 include/tcg/tcg-op-gvec-common.h
12
13
diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h
14
new file mode 100644
15
index XXXXXXX..XXXXXXX
16
--- /dev/null
17
+++ b/include/tcg/tcg-op-gvec-common.h
18
@@ -XXX,XX +XXX,XX @@
19
+/* SPDX-License-Identifier: GPL-2.0-or-later */
20
+/*
21
+ * Target independent generic vector operation expansion
22
+ *
23
+ * Copyright (c) 2018 Linaro
24
+ */
25
+
26
+#ifndef TCG_TCG_OP_GVEC_COMMON_H
27
+#define TCG_TCG_OP_GVEC_COMMON_H
28
+
29
+/*
30
+ * "Generic" vectors. All operands are given as offsets from ENV,
31
+ * and therefore cannot also be allocated via tcg_global_mem_new_*.
32
+ * OPRSZ is the byte size of the vector upon which the operation is performed.
33
+ * MAXSZ is the byte size of the full vector; bytes beyond OPSZ are cleared.
34
+ *
35
+ * All sizes must be 8 or any multiple of 16.
36
+ * When OPRSZ is 8, the alignment may be 8, otherwise must be 16.
37
+ * Operands may completely, but not partially, overlap.
38
+ */
39
+
40
+/* Expand a call to a gvec-style helper, with pointers to two vector
41
+ operands, and a descriptor (see tcg-gvec-desc.h). */
42
+typedef void gen_helper_gvec_2(TCGv_ptr, TCGv_ptr, TCGv_i32);
43
+void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
44
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
45
+ gen_helper_gvec_2 *fn);
46
+
47
+/* Similarly, passing an extra data value. */
48
+typedef void gen_helper_gvec_2i(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
49
+void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
50
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
51
+ gen_helper_gvec_2i *fn);
52
+
53
+/* Similarly, passing an extra pointer (e.g. env or float_status). */
54
+typedef void gen_helper_gvec_2_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
55
+void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
56
+ TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
57
+ int32_t data, gen_helper_gvec_2_ptr *fn);
58
+
59
+/* Similarly, with three vector operands. */
60
+typedef void gen_helper_gvec_3(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
61
+void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
62
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
63
+ gen_helper_gvec_3 *fn);
64
+
65
+/* Similarly, with four vector operands. */
66
+typedef void gen_helper_gvec_4(TCGv_ptr, TCGv_ptr, TCGv_ptr,
67
+ TCGv_ptr, TCGv_i32);
68
+void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
69
+ uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
70
+ int32_t data, gen_helper_gvec_4 *fn);
71
+
72
+/* Similarly, with five vector operands. */
73
+typedef void gen_helper_gvec_5(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
74
+ TCGv_ptr, TCGv_i32);
75
+void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
76
+ uint32_t cofs, uint32_t xofs, uint32_t oprsz,
77
+ uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn);
78
+
79
+typedef void gen_helper_gvec_3_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
80
+ TCGv_ptr, TCGv_i32);
81
+void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
82
+ TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
83
+ int32_t data, gen_helper_gvec_3_ptr *fn);
84
+
85
+typedef void gen_helper_gvec_4_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
86
+ TCGv_ptr, TCGv_ptr, TCGv_i32);
87
+void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
88
+ uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz,
89
+ uint32_t maxsz, int32_t data,
90
+ gen_helper_gvec_4_ptr *fn);
91
+
92
+typedef void gen_helper_gvec_5_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
93
+ TCGv_ptr, TCGv_ptr, TCGv_i32);
94
+void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
95
+ uint32_t cofs, uint32_t eofs, TCGv_ptr ptr,
96
+ uint32_t oprsz, uint32_t maxsz, int32_t data,
97
+ gen_helper_gvec_5_ptr *fn);
98
+
99
+/* Expand a gvec operation. Either inline or out-of-line depending on
100
+ the actual vector size and the operations supported by the host. */
101
+typedef struct {
102
+ /* Expand inline as a 64-bit or 32-bit integer.
103
+ Only one of these will be non-NULL. */
104
+ void (*fni8)(TCGv_i64, TCGv_i64);
105
+ void (*fni4)(TCGv_i32, TCGv_i32);
106
+ /* Expand inline with a host vector type. */
107
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec);
108
+ /* Expand out-of-line helper w/descriptor. */
109
+ gen_helper_gvec_2 *fno;
110
+ /* The optional opcodes, if any, utilized by .fniv. */
111
+ const TCGOpcode *opt_opc;
112
+ /* The data argument to the out-of-line helper. */
113
+ int32_t data;
114
+ /* The vector element size, if applicable. */
115
+ uint8_t vece;
116
+ /* Prefer i64 to v64. */
117
+ bool prefer_i64;
118
+ /* Load dest as a 2nd source operand. */
119
+ bool load_dest;
120
+} GVecGen2;
121
+
122
+typedef struct {
123
+ /* Expand inline as a 64-bit or 32-bit integer.
124
+ Only one of these will be non-NULL. */
125
+ void (*fni8)(TCGv_i64, TCGv_i64, int64_t);
126
+ void (*fni4)(TCGv_i32, TCGv_i32, int32_t);
127
+ /* Expand inline with a host vector type. */
128
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, int64_t);
129
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
130
+ gen_helper_gvec_2 *fno;
131
+ /* Expand out-of-line helper w/descriptor, data as argument. */
132
+ gen_helper_gvec_2i *fnoi;
133
+ /* The optional opcodes, if any, utilized by .fniv. */
134
+ const TCGOpcode *opt_opc;
135
+ /* The vector element size, if applicable. */
136
+ uint8_t vece;
137
+ /* Prefer i64 to v64. */
138
+ bool prefer_i64;
139
+ /* Load dest as a 3rd source operand. */
140
+ bool load_dest;
141
+} GVecGen2i;
142
+
143
+typedef struct {
144
+ /* Expand inline as a 64-bit or 32-bit integer.
145
+ Only one of these will be non-NULL. */
146
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
147
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
148
+ /* Expand inline with a host vector type. */
149
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
150
+ /* Expand out-of-line helper w/descriptor. */
151
+ gen_helper_gvec_2i *fno;
152
+ /* The optional opcodes, if any, utilized by .fniv. */
153
+ const TCGOpcode *opt_opc;
154
+ /* The data argument to the out-of-line helper. */
155
+ uint32_t data;
156
+ /* The vector element size, if applicable. */
157
+ uint8_t vece;
158
+ /* Prefer i64 to v64. */
159
+ bool prefer_i64;
160
+ /* Load scalar as 1st source operand. */
161
+ bool scalar_first;
162
+} GVecGen2s;
163
+
164
+typedef struct {
165
+ /* Expand inline as a 64-bit or 32-bit integer.
166
+ Only one of these will be non-NULL. */
167
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
168
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
169
+ /* Expand inline with a host vector type. */
170
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
171
+ /* Expand out-of-line helper w/descriptor. */
172
+ gen_helper_gvec_3 *fno;
173
+ /* The optional opcodes, if any, utilized by .fniv. */
174
+ const TCGOpcode *opt_opc;
175
+ /* The data argument to the out-of-line helper. */
176
+ int32_t data;
177
+ /* The vector element size, if applicable. */
178
+ uint8_t vece;
179
+ /* Prefer i64 to v64. */
180
+ bool prefer_i64;
181
+ /* Load dest as a 3rd source operand. */
182
+ bool load_dest;
183
+} GVecGen3;
184
+
185
+typedef struct {
186
+ /*
187
+ * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
188
+ * non-NULL.
189
+ */
190
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
191
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
192
+ /* Expand inline with a host vector type. */
193
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
194
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
195
+ gen_helper_gvec_3 *fno;
196
+ /* The optional opcodes, if any, utilized by .fniv. */
197
+ const TCGOpcode *opt_opc;
198
+ /* The vector element size, if applicable. */
199
+ uint8_t vece;
200
+ /* Prefer i64 to v64. */
201
+ bool prefer_i64;
202
+ /* Load dest as a 3rd source operand. */
203
+ bool load_dest;
204
+} GVecGen3i;
205
+
206
+typedef struct {
207
+ /* Expand inline as a 64-bit or 32-bit integer.
208
+ Only one of these will be non-NULL. */
209
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64);
210
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
211
+ /* Expand inline with a host vector type. */
212
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec);
213
+ /* Expand out-of-line helper w/descriptor. */
214
+ gen_helper_gvec_4 *fno;
215
+ /* The optional opcodes, if any, utilized by .fniv. */
216
+ const TCGOpcode *opt_opc;
217
+ /* The data argument to the out-of-line helper. */
218
+ int32_t data;
219
+ /* The vector element size, if applicable. */
220
+ uint8_t vece;
221
+ /* Prefer i64 to v64. */
222
+ bool prefer_i64;
223
+ /* Write aofs as a 2nd dest operand. */
224
+ bool write_aofs;
225
+} GVecGen4;
226
+
227
+typedef struct {
228
+ /*
229
+ * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
230
+ * non-NULL.
231
+ */
232
+ void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
233
+ void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
234
+ /* Expand inline with a host vector type. */
235
+ void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
236
+ /* Expand out-of-line helper w/descriptor, data in descriptor. */
237
+ gen_helper_gvec_4 *fno;
238
+ /* The optional opcodes, if any, utilized by .fniv. */
239
+ const TCGOpcode *opt_opc;
240
+ /* The vector element size, if applicable. */
241
+ uint8_t vece;
242
+ /* Prefer i64 to v64. */
243
+ bool prefer_i64;
244
+} GVecGen4i;
245
+
246
+void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
247
+ uint32_t oprsz, uint32_t maxsz, const GVecGen2 *);
248
+void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
249
+ uint32_t maxsz, int64_t c, const GVecGen2i *);
250
+void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
251
+ uint32_t maxsz, TCGv_i64 c, const GVecGen2s *);
252
+void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
253
+ uint32_t oprsz, uint32_t maxsz, const GVecGen3 *);
254
+void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
255
+ uint32_t oprsz, uint32_t maxsz, int64_t c,
256
+ const GVecGen3i *);
257
+void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
258
+ uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
259
+void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
260
+ uint32_t oprsz, uint32_t maxsz, int64_t c,
261
+ const GVecGen4i *);
262
+
263
+/* Expand a specific vector operation. */
264
+
265
+void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
266
+ uint32_t oprsz, uint32_t maxsz);
267
+void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
268
+ uint32_t oprsz, uint32_t maxsz);
269
+void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
270
+ uint32_t oprsz, uint32_t maxsz);
271
+void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
272
+ uint32_t oprsz, uint32_t maxsz);
273
+
274
+void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
275
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
276
+void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
277
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
278
+void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
279
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
280
+
281
+void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs,
282
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
283
+void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs,
284
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
285
+
286
+void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
287
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
288
+void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
289
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
290
+void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs,
291
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
292
+
293
+/* Saturated arithmetic. */
294
+void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
295
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
296
+void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
297
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
298
+void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
299
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
300
+void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
301
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
302
+
303
+/* Min/max. */
304
+void tcg_gen_gvec_smin(unsigned vece, uint32_t dofs, uint32_t aofs,
305
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
306
+void tcg_gen_gvec_umin(unsigned vece, uint32_t dofs, uint32_t aofs,
307
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
308
+void tcg_gen_gvec_smax(unsigned vece, uint32_t dofs, uint32_t aofs,
309
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
310
+void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
311
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
312
+
313
+void tcg_gen_gvec_and(unsigned vece, uint32_t dofs, uint32_t aofs,
314
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
315
+void tcg_gen_gvec_or(unsigned vece, uint32_t dofs, uint32_t aofs,
316
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
317
+void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
318
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
319
+void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
320
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
321
+void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
322
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
323
+void tcg_gen_gvec_nand(unsigned vece, uint32_t dofs, uint32_t aofs,
324
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
325
+void tcg_gen_gvec_nor(unsigned vece, uint32_t dofs, uint32_t aofs,
326
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
327
+void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
328
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
329
+
330
+void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs,
331
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
332
+void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs,
333
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
334
+void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs,
335
+ int64_t c, uint32_t oprsz, uint32_t maxsz);
336
+
337
+void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
338
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
339
+void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs,
340
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
341
+void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
342
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
343
+void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
344
+ TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
345
+
346
+void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
347
+ uint32_t s, uint32_t m);
348
+void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t s,
349
+ uint32_t m, uint64_t imm);
350
+void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s,
351
+ uint32_t m, TCGv_i32);
352
+void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s,
353
+ uint32_t m, TCGv_i64);
354
+
355
+void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
356
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
357
+void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
358
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
359
+void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
360
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
361
+void tcg_gen_gvec_rotli(unsigned vece, uint32_t dofs, uint32_t aofs,
362
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
363
+void tcg_gen_gvec_rotri(unsigned vece, uint32_t dofs, uint32_t aofs,
364
+ int64_t shift, uint32_t oprsz, uint32_t maxsz);
365
+
366
+void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
367
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
368
+void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
369
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
370
+void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
371
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
372
+void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs,
373
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
374
+void tcg_gen_gvec_rotrs(unsigned vece, uint32_t dofs, uint32_t aofs,
375
+ TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
376
+
377
+/*
378
+ * Perform vector shift by vector element, modulo the element size.
379
+ * E.g. D[i] = A[i] << (B[i] % (8 << vece)).
380
+ */
381
+void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
382
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
383
+void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
384
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
385
+void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
386
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
387
+void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs,
388
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
389
+void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
390
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
391
+
392
+void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
393
+ uint32_t aofs, uint32_t bofs,
394
+ uint32_t oprsz, uint32_t maxsz);
395
+
396
+/*
397
+ * Perform vector bit select: d = (b & a) | (c & ~a).
398
+ */
399
+void tcg_gen_gvec_bitsel(unsigned vece, uint32_t dofs, uint32_t aofs,
400
+ uint32_t bofs, uint32_t cofs,
401
+ uint32_t oprsz, uint32_t maxsz);
402
+
403
+/*
404
+ * 64-bit vector operations. Use these when the register has been allocated
405
+ * with tcg_global_mem_new_i64, and so we cannot also address it via pointer.
406
+ * OPRSZ = MAXSZ = 8.
407
+ */
408
+
409
+void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 a);
410
+void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 a);
411
+void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 a);
412
+
413
+void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
414
+void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
415
+void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
416
+
417
+void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
418
+void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
419
+void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
420
+
421
+void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
422
+void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
423
+void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
424
+void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
425
+void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
426
+void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
427
+void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
428
+void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
429
+
430
+/* 32-bit vector operations. */
431
+void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
432
+void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
433
+
434
+void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
435
+void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
436
+
437
+void tcg_gen_vec_shl8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
438
+void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
439
+void tcg_gen_vec_shr8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
440
+void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
441
+void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
442
+void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
443
+
444
+#endif
445
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
446
index XXXXXXX..XXXXXXX 100644
447
--- a/include/tcg/tcg-op-gvec.h
448
+++ b/include/tcg/tcg-op-gvec.h
449
@@ -XXX,XX +XXX,XX @@
450
+/* SPDX-License-Identifier: GPL-2.0-or-later */
451
/*
452
- * Generic vector operation expansion
453
+ * Target dependent generic vector operation expansion
454
*
455
* Copyright (c) 2018 Linaro
456
- *
457
- * This library is free software; you can redistribute it and/or
458
- * modify it under the terms of the GNU Lesser General Public
459
- * License as published by the Free Software Foundation; either
460
- * version 2.1 of the License, or (at your option) any later version.
461
- *
462
- * This library is distributed in the hope that it will be useful,
463
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
464
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
465
- * Lesser General Public License for more details.
466
- *
467
- * You should have received a copy of the GNU Lesser General Public
468
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
469
*/
470
471
#ifndef TCG_TCG_OP_GVEC_H
472
#define TCG_TCG_OP_GVEC_H
473
474
-/*
475
- * "Generic" vectors. All operands are given as offsets from ENV,
476
- * and therefore cannot also be allocated via tcg_global_mem_new_*.
477
- * OPRSZ is the byte size of the vector upon which the operation is performed.
478
- * MAXSZ is the byte size of the full vector; bytes beyond OPSZ are cleared.
479
- *
480
- * All sizes must be 8 or any multiple of 16.
481
- * When OPRSZ is 8, the alignment may be 8, otherwise must be 16.
482
- * Operands may completely, but not partially, overlap.
483
- */
484
+#include "tcg/tcg-op-gvec-common.h"
485
486
-/* Expand a call to a gvec-style helper, with pointers to two vector
487
- operands, and a descriptor (see tcg-gvec-desc.h). */
488
-typedef void gen_helper_gvec_2(TCGv_ptr, TCGv_ptr, TCGv_i32);
489
-void tcg_gen_gvec_2_ool(uint32_t dofs, uint32_t aofs,
490
- uint32_t oprsz, uint32_t maxsz, int32_t data,
491
- gen_helper_gvec_2 *fn);
492
-
493
-/* Similarly, passing an extra data value. */
494
-typedef void gen_helper_gvec_2i(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
495
-void tcg_gen_gvec_2i_ool(uint32_t dofs, uint32_t aofs, TCGv_i64 c,
496
- uint32_t oprsz, uint32_t maxsz, int32_t data,
497
- gen_helper_gvec_2i *fn);
498
-
499
-/* Similarly, passing an extra pointer (e.g. env or float_status). */
500
-typedef void gen_helper_gvec_2_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
501
-void tcg_gen_gvec_2_ptr(uint32_t dofs, uint32_t aofs,
502
- TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
503
- int32_t data, gen_helper_gvec_2_ptr *fn);
504
-
505
-/* Similarly, with three vector operands. */
506
-typedef void gen_helper_gvec_3(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
507
-void tcg_gen_gvec_3_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
508
- uint32_t oprsz, uint32_t maxsz, int32_t data,
509
- gen_helper_gvec_3 *fn);
510
-
511
-/* Similarly, with four vector operands. */
512
-typedef void gen_helper_gvec_4(TCGv_ptr, TCGv_ptr, TCGv_ptr,
513
- TCGv_ptr, TCGv_i32);
514
-void tcg_gen_gvec_4_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
515
- uint32_t cofs, uint32_t oprsz, uint32_t maxsz,
516
- int32_t data, gen_helper_gvec_4 *fn);
517
-
518
-/* Similarly, with five vector operands. */
519
-typedef void gen_helper_gvec_5(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
520
- TCGv_ptr, TCGv_i32);
521
-void tcg_gen_gvec_5_ool(uint32_t dofs, uint32_t aofs, uint32_t bofs,
522
- uint32_t cofs, uint32_t xofs, uint32_t oprsz,
523
- uint32_t maxsz, int32_t data, gen_helper_gvec_5 *fn);
524
-
525
-typedef void gen_helper_gvec_3_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
526
- TCGv_ptr, TCGv_i32);
527
-void tcg_gen_gvec_3_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
528
- TCGv_ptr ptr, uint32_t oprsz, uint32_t maxsz,
529
- int32_t data, gen_helper_gvec_3_ptr *fn);
530
-
531
-typedef void gen_helper_gvec_4_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr,
532
- TCGv_ptr, TCGv_ptr, TCGv_i32);
533
-void tcg_gen_gvec_4_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
534
- uint32_t cofs, TCGv_ptr ptr, uint32_t oprsz,
535
- uint32_t maxsz, int32_t data,
536
- gen_helper_gvec_4_ptr *fn);
537
-
538
-typedef void gen_helper_gvec_5_ptr(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
539
- TCGv_ptr, TCGv_ptr, TCGv_i32);
540
-void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
541
- uint32_t cofs, uint32_t eofs, TCGv_ptr ptr,
542
- uint32_t oprsz, uint32_t maxsz, int32_t data,
543
- gen_helper_gvec_5_ptr *fn);
544
-
545
-/* Expand a gvec operation. Either inline or out-of-line depending on
546
- the actual vector size and the operations supported by the host. */
547
-typedef struct {
548
- /* Expand inline as a 64-bit or 32-bit integer.
549
- Only one of these will be non-NULL. */
550
- void (*fni8)(TCGv_i64, TCGv_i64);
551
- void (*fni4)(TCGv_i32, TCGv_i32);
552
- /* Expand inline with a host vector type. */
553
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec);
554
- /* Expand out-of-line helper w/descriptor. */
555
- gen_helper_gvec_2 *fno;
556
- /* The optional opcodes, if any, utilized by .fniv. */
557
- const TCGOpcode *opt_opc;
558
- /* The data argument to the out-of-line helper. */
559
- int32_t data;
560
- /* The vector element size, if applicable. */
561
- uint8_t vece;
562
- /* Prefer i64 to v64. */
563
- bool prefer_i64;
564
- /* Load dest as a 2nd source operand. */
565
- bool load_dest;
566
-} GVecGen2;
567
-
568
-typedef struct {
569
- /* Expand inline as a 64-bit or 32-bit integer.
570
- Only one of these will be non-NULL. */
571
- void (*fni8)(TCGv_i64, TCGv_i64, int64_t);
572
- void (*fni4)(TCGv_i32, TCGv_i32, int32_t);
573
- /* Expand inline with a host vector type. */
574
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, int64_t);
575
- /* Expand out-of-line helper w/descriptor, data in descriptor. */
576
- gen_helper_gvec_2 *fno;
577
- /* Expand out-of-line helper w/descriptor, data as argument. */
578
- gen_helper_gvec_2i *fnoi;
579
- /* The optional opcodes, if any, utilized by .fniv. */
580
- const TCGOpcode *opt_opc;
581
- /* The vector element size, if applicable. */
582
- uint8_t vece;
583
- /* Prefer i64 to v64. */
584
- bool prefer_i64;
585
- /* Load dest as a 3rd source operand. */
586
- bool load_dest;
587
-} GVecGen2i;
588
-
589
-typedef struct {
590
- /* Expand inline as a 64-bit or 32-bit integer.
591
- Only one of these will be non-NULL. */
592
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
593
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
594
- /* Expand inline with a host vector type. */
595
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
596
- /* Expand out-of-line helper w/descriptor. */
597
- gen_helper_gvec_2i *fno;
598
- /* The optional opcodes, if any, utilized by .fniv. */
599
- const TCGOpcode *opt_opc;
600
- /* The data argument to the out-of-line helper. */
601
- uint32_t data;
602
- /* The vector element size, if applicable. */
603
- uint8_t vece;
604
- /* Prefer i64 to v64. */
605
- bool prefer_i64;
606
- /* Load scalar as 1st source operand. */
607
- bool scalar_first;
608
-} GVecGen2s;
609
-
610
-typedef struct {
611
- /* Expand inline as a 64-bit or 32-bit integer.
612
- Only one of these will be non-NULL. */
613
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64);
614
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32);
615
- /* Expand inline with a host vector type. */
616
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec);
617
- /* Expand out-of-line helper w/descriptor. */
618
- gen_helper_gvec_3 *fno;
619
- /* The optional opcodes, if any, utilized by .fniv. */
620
- const TCGOpcode *opt_opc;
621
- /* The data argument to the out-of-line helper. */
622
- int32_t data;
623
- /* The vector element size, if applicable. */
624
- uint8_t vece;
625
- /* Prefer i64 to v64. */
626
- bool prefer_i64;
627
- /* Load dest as a 3rd source operand. */
628
- bool load_dest;
629
-} GVecGen3;
630
-
631
-typedef struct {
632
- /*
633
- * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
634
- * non-NULL.
635
- */
636
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
637
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
638
- /* Expand inline with a host vector type. */
639
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
640
- /* Expand out-of-line helper w/descriptor, data in descriptor. */
641
- gen_helper_gvec_3 *fno;
642
- /* The optional opcodes, if any, utilized by .fniv. */
643
- const TCGOpcode *opt_opc;
644
- /* The vector element size, if applicable. */
645
- uint8_t vece;
646
- /* Prefer i64 to v64. */
647
- bool prefer_i64;
648
- /* Load dest as a 3rd source operand. */
649
- bool load_dest;
650
-} GVecGen3i;
651
-
652
-typedef struct {
653
- /* Expand inline as a 64-bit or 32-bit integer.
654
- Only one of these will be non-NULL. */
655
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64);
656
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
657
- /* Expand inline with a host vector type. */
658
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec);
659
- /* Expand out-of-line helper w/descriptor. */
660
- gen_helper_gvec_4 *fno;
661
- /* The optional opcodes, if any, utilized by .fniv. */
662
- const TCGOpcode *opt_opc;
663
- /* The data argument to the out-of-line helper. */
664
- int32_t data;
665
- /* The vector element size, if applicable. */
666
- uint8_t vece;
667
- /* Prefer i64 to v64. */
668
- bool prefer_i64;
669
- /* Write aofs as a 2nd dest operand. */
670
- bool write_aofs;
671
-} GVecGen4;
672
-
673
-typedef struct {
674
- /*
675
- * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
676
- * non-NULL.
677
- */
678
- void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
679
- void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
680
- /* Expand inline with a host vector type. */
681
- void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
682
- /* Expand out-of-line helper w/descriptor, data in descriptor. */
683
- gen_helper_gvec_4 *fno;
684
- /* The optional opcodes, if any, utilized by .fniv. */
685
- const TCGOpcode *opt_opc;
686
- /* The vector element size, if applicable. */
687
- uint8_t vece;
688
- /* Prefer i64 to v64. */
689
- bool prefer_i64;
690
-} GVecGen4i;
691
-
692
-void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
693
- uint32_t oprsz, uint32_t maxsz, const GVecGen2 *);
694
-void tcg_gen_gvec_2i(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
695
- uint32_t maxsz, int64_t c, const GVecGen2i *);
696
-void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
697
- uint32_t maxsz, TCGv_i64 c, const GVecGen2s *);
698
-void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
699
- uint32_t oprsz, uint32_t maxsz, const GVecGen3 *);
700
-void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
701
- uint32_t oprsz, uint32_t maxsz, int64_t c,
702
- const GVecGen3i *);
703
-void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
704
- uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
705
-void tcg_gen_gvec_4i(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t cofs,
706
- uint32_t oprsz, uint32_t maxsz, int64_t c,
707
- const GVecGen4i *);
708
-
709
-/* Expand a specific vector operation. */
710
-
711
-void tcg_gen_gvec_mov(unsigned vece, uint32_t dofs, uint32_t aofs,
712
- uint32_t oprsz, uint32_t maxsz);
713
-void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
714
- uint32_t oprsz, uint32_t maxsz);
715
-void tcg_gen_gvec_neg(unsigned vece, uint32_t dofs, uint32_t aofs,
716
- uint32_t oprsz, uint32_t maxsz);
717
-void tcg_gen_gvec_abs(unsigned vece, uint32_t dofs, uint32_t aofs,
718
- uint32_t oprsz, uint32_t maxsz);
719
-
720
-void tcg_gen_gvec_add(unsigned vece, uint32_t dofs, uint32_t aofs,
721
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
722
-void tcg_gen_gvec_sub(unsigned vece, uint32_t dofs, uint32_t aofs,
723
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
724
-void tcg_gen_gvec_mul(unsigned vece, uint32_t dofs, uint32_t aofs,
725
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
726
-
727
-void tcg_gen_gvec_addi(unsigned vece, uint32_t dofs, uint32_t aofs,
728
- int64_t c, uint32_t oprsz, uint32_t maxsz);
729
-void tcg_gen_gvec_muli(unsigned vece, uint32_t dofs, uint32_t aofs,
730
- int64_t c, uint32_t oprsz, uint32_t maxsz);
731
-
732
-void tcg_gen_gvec_adds(unsigned vece, uint32_t dofs, uint32_t aofs,
733
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
734
-void tcg_gen_gvec_subs(unsigned vece, uint32_t dofs, uint32_t aofs,
735
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
736
-void tcg_gen_gvec_muls(unsigned vece, uint32_t dofs, uint32_t aofs,
737
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
738
-
739
-/* Saturated arithmetic. */
740
-void tcg_gen_gvec_ssadd(unsigned vece, uint32_t dofs, uint32_t aofs,
741
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
742
-void tcg_gen_gvec_sssub(unsigned vece, uint32_t dofs, uint32_t aofs,
743
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
744
-void tcg_gen_gvec_usadd(unsigned vece, uint32_t dofs, uint32_t aofs,
745
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
746
-void tcg_gen_gvec_ussub(unsigned vece, uint32_t dofs, uint32_t aofs,
747
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
748
-
749
-/* Min/max. */
750
-void tcg_gen_gvec_smin(unsigned vece, uint32_t dofs, uint32_t aofs,
751
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
752
-void tcg_gen_gvec_umin(unsigned vece, uint32_t dofs, uint32_t aofs,
753
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
754
-void tcg_gen_gvec_smax(unsigned vece, uint32_t dofs, uint32_t aofs,
755
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
756
-void tcg_gen_gvec_umax(unsigned vece, uint32_t dofs, uint32_t aofs,
757
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
758
-
759
-void tcg_gen_gvec_and(unsigned vece, uint32_t dofs, uint32_t aofs,
760
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
761
-void tcg_gen_gvec_or(unsigned vece, uint32_t dofs, uint32_t aofs,
762
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
763
-void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
764
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
765
-void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
766
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
767
-void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
768
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
769
-void tcg_gen_gvec_nand(unsigned vece, uint32_t dofs, uint32_t aofs,
770
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
771
-void tcg_gen_gvec_nor(unsigned vece, uint32_t dofs, uint32_t aofs,
772
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
773
-void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
774
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
775
-
776
-void tcg_gen_gvec_andi(unsigned vece, uint32_t dofs, uint32_t aofs,
777
- int64_t c, uint32_t oprsz, uint32_t maxsz);
778
-void tcg_gen_gvec_xori(unsigned vece, uint32_t dofs, uint32_t aofs,
779
- int64_t c, uint32_t oprsz, uint32_t maxsz);
780
-void tcg_gen_gvec_ori(unsigned vece, uint32_t dofs, uint32_t aofs,
781
- int64_t c, uint32_t oprsz, uint32_t maxsz);
782
-
783
-void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
784
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
785
-void tcg_gen_gvec_andcs(unsigned vece, uint32_t dofs, uint32_t aofs,
786
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
787
-void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
788
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
789
-void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
790
- TCGv_i64 c, uint32_t oprsz, uint32_t maxsz);
791
-
792
-void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
793
- uint32_t s, uint32_t m);
794
-void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t s,
795
- uint32_t m, uint64_t imm);
796
-void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s,
797
- uint32_t m, TCGv_i32);
798
-void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s,
799
- uint32_t m, TCGv_i64);
800
-
801
-#if TARGET_LONG_BITS == 64
802
-# define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i64
803
-#else
804
-# define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i32
805
+#ifndef TARGET_LONG_BITS
806
+#error must include QEMU headers
807
#endif
808
809
-void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
810
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
811
-void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
812
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
813
-void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
814
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
815
-void tcg_gen_gvec_rotli(unsigned vece, uint32_t dofs, uint32_t aofs,
816
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
817
-void tcg_gen_gvec_rotri(unsigned vece, uint32_t dofs, uint32_t aofs,
818
- int64_t shift, uint32_t oprsz, uint32_t maxsz);
819
-
820
-void tcg_gen_gvec_shls(unsigned vece, uint32_t dofs, uint32_t aofs,
821
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
822
-void tcg_gen_gvec_shrs(unsigned vece, uint32_t dofs, uint32_t aofs,
823
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
824
-void tcg_gen_gvec_sars(unsigned vece, uint32_t dofs, uint32_t aofs,
825
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
826
-void tcg_gen_gvec_rotls(unsigned vece, uint32_t dofs, uint32_t aofs,
827
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
828
-void tcg_gen_gvec_rotrs(unsigned vece, uint32_t dofs, uint32_t aofs,
829
- TCGv_i32 shift, uint32_t oprsz, uint32_t maxsz);
830
-
831
-/*
832
- * Perform vector shift by vector element, modulo the element size.
833
- * E.g. D[i] = A[i] << (B[i] % (8 << vece)).
834
- */
835
-void tcg_gen_gvec_shlv(unsigned vece, uint32_t dofs, uint32_t aofs,
836
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
837
-void tcg_gen_gvec_shrv(unsigned vece, uint32_t dofs, uint32_t aofs,
838
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
839
-void tcg_gen_gvec_sarv(unsigned vece, uint32_t dofs, uint32_t aofs,
840
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
841
-void tcg_gen_gvec_rotlv(unsigned vece, uint32_t dofs, uint32_t aofs,
842
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
843
-void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
844
- uint32_t bofs, uint32_t oprsz, uint32_t maxsz);
845
-
846
-void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
847
- uint32_t aofs, uint32_t bofs,
848
- uint32_t oprsz, uint32_t maxsz);
849
-
850
-/*
851
- * Perform vector bit select: d = (b & a) | (c & ~a).
852
- */
853
-void tcg_gen_gvec_bitsel(unsigned vece, uint32_t dofs, uint32_t aofs,
854
- uint32_t bofs, uint32_t cofs,
855
- uint32_t oprsz, uint32_t maxsz);
856
-
857
-/*
858
- * 64-bit vector operations. Use these when the register has been allocated
859
- * with tcg_global_mem_new_i64, and so we cannot also address it via pointer.
860
- * OPRSZ = MAXSZ = 8.
861
- */
862
-
863
-void tcg_gen_vec_neg8_i64(TCGv_i64 d, TCGv_i64 a);
864
-void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 a);
865
-void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 a);
866
-
867
-void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
868
-void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
869
-void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
870
-
871
-void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
872
-void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
873
-void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
874
-
875
-void tcg_gen_vec_shl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
876
-void tcg_gen_vec_shl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
877
-void tcg_gen_vec_shr8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
878
-void tcg_gen_vec_shr16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
879
-void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
880
-void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t);
881
-void tcg_gen_vec_rotl8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
882
-void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
883
-
884
-/* 32-bit vector operations. */
885
-void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
886
-void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
887
-
888
-void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
889
-void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
890
-
891
-void tcg_gen_vec_shl8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
892
-void tcg_gen_vec_shl16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
893
-void tcg_gen_vec_shr8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
894
-void tcg_gen_vec_shr16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
895
-void tcg_gen_vec_sar8i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
896
-void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
897
-
898
#if TARGET_LONG_BITS == 64
899
+#define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i64
900
#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i64
901
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i64
902
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i64
903
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
904
#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i64
905
#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i64
906
#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i64
907
-
908
-#else
909
+#elif TARGET_LONG_BITS == 32
910
+#define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i32
911
#define tcg_gen_vec_add8_tl tcg_gen_vec_add8_i32
912
#define tcg_gen_vec_sub8_tl tcg_gen_vec_sub8_i32
913
#define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
914
@@ -XXX,XX +XXX,XX @@ void tcg_gen_vec_sar16i_i32(TCGv_i32 d, TCGv_i32 a, int32_t);
915
#define tcg_gen_vec_shl16i_tl tcg_gen_vec_shl16i_i32
916
#define tcg_gen_vec_shr16i_tl tcg_gen_vec_shr16i_i32
917
#define tcg_gen_vec_sar16i_tl tcg_gen_vec_sar16i_i32
918
+#else
919
+# error
920
#endif
921
922
#endif
923
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
924
index XXXXXXX..XXXXXXX 100644
925
--- a/tcg/tcg-op-gvec.c
926
+++ b/tcg/tcg-op-gvec.c
927
@@ -XXX,XX +XXX,XX @@
928
#include "tcg/tcg.h"
929
#include "tcg/tcg-temp-internal.h"
930
#include "tcg/tcg-op-common.h"
931
-#include "tcg/tcg-op-gvec.h"
932
+#include "tcg/tcg-op-gvec-common.h"
933
#include "tcg/tcg-gvec-desc.h"
934
935
#define MAX_UNROLL 4
936
--
937
2.34.1
938
939
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
From this remove, it's no longer clear what this is attempting
2
to protect. The last time a use of this define was added to
3
the source tree, as opposed to merely moved around, was 2008.
4
There have been many cleanups since that time and this is
5
no longer required for the build to succeed.
2
6
3
Before: 35.912 s ± 0.168 s
7
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
After: 35.565 s ± 0.087 s
5
6
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20220811151413.3350684-5-alex.bennee@linaro.org>
9
Signed-off-by: Cédric Le Goater <clg@kaod.org>
10
Message-Id: <20220923084803.498337-5-clg@kaod.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
9
---
13
accel/tcg/cputlb.c | 15 ++++++---------
10
target/ppc/cpu.h | 2 --
14
1 file changed, 6 insertions(+), 9 deletions(-)
11
target/sparc/cpu.h | 2 --
12
accel/tcg/translate-all.c | 1 -
13
tcg/tcg.c | 6 ------
14
4 files changed, 11 deletions(-)
15
15
16
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
16
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
17
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/cputlb.c
18
--- a/target/ppc/cpu.h
19
+++ b/accel/tcg/cputlb.c
19
+++ b/target/ppc/cpu.h
20
@@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
20
@@ -XXX,XX +XXX,XX @@ void ppc_store_msr(CPUPPCState *env, target_ulong value);
21
static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
21
void ppc_cpu_list(void);
22
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
22
23
{
23
/* Time-base and decrementer management */
24
- CPUClass *cc = CPU_GET_CLASS(cpu);
24
-#ifndef NO_CPU_IO_DEFS
25
bool ok;
25
uint64_t cpu_ppc_load_tbl(CPUPPCState *env);
26
26
uint32_t cpu_ppc_load_tbu(CPUPPCState *env);
27
/*
27
void cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value);
28
* This is not a probe, so only valid return is success; failure
28
@@ -XXX,XX +XXX,XX @@ int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
29
* should result in exception + longjmp to the cpu loop.
29
hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
30
*/
30
ppcmas_tlb_t *tlb);
31
- ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
31
#endif
32
- access_type, mmu_idx, false, retaddr);
32
-#endif
33
+ ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
33
34
+ access_type, mmu_idx, false, retaddr);
34
void ppc_store_fpscr(CPUPPCState *env, target_ulong val);
35
assert(ok);
35
void helper_hfscr_facility_check(CPUPPCState *env, uint32_t bit,
36
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/sparc/cpu.h
39
+++ b/target/sparc/cpu.h
40
@@ -XXX,XX +XXX,XX @@ G_NORETURN void sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
41
uintptr_t retaddr);
42
G_NORETURN void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t);
43
44
-#ifndef NO_CPU_IO_DEFS
45
/* cpu_init.c */
46
void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
47
void sparc_cpu_list(void);
48
@@ -XXX,XX +XXX,XX @@ static inline int tlb_compare_context(const SparcTLBEntry *tlb,
49
return compare_masked(context, tlb->tag, MMU_CONTEXT_MASK);
36
}
50
}
37
51
38
@@ -XXX,XX +XXX,XX @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
52
-#endif
39
MMUAccessType access_type,
53
#endif
40
int mmu_idx, uintptr_t retaddr)
54
41
{
55
/* cpu-exec.c */
42
- CPUClass *cc = CPU_GET_CLASS(cpu);
56
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/accel/tcg/translate-all.c
59
+++ b/accel/tcg/translate-all.c
60
@@ -XXX,XX +XXX,XX @@
61
62
#include "qemu/osdep.h"
63
64
-#define NO_CPU_IO_DEFS
65
#include "trace.h"
66
#include "disas/disas.h"
67
#include "exec/exec-all.h"
68
diff --git a/tcg/tcg.c b/tcg/tcg.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tcg/tcg.c
71
+++ b/tcg/tcg.c
72
@@ -XXX,XX +XXX,XX @@
73
#include "qemu/cacheflush.h"
74
#include "qemu/cacheinfo.h"
75
#include "qemu/timer.h"
43
-
76
-
44
- cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
77
-/* Note: the long term plan is to reduce the dependencies on the QEMU
45
+ cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
78
- CPU definitions. Currently they are used for qemu_ld/st
46
+ mmu_idx, retaddr);
79
- instructions */
47
}
80
-#define NO_CPU_IO_DEFS
48
81
-
49
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
82
#include "exec/exec-all.h"
50
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
83
#include "exec/tlb-common.h"
51
if (!tlb_hit_page(tlb_addr, page_addr)) {
84
#include "tcg/tcg-op-common.h"
52
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
53
CPUState *cs = env_cpu(env);
54
- CPUClass *cc = CPU_GET_CLASS(cs);
55
56
- if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
57
- mmu_idx, nonfault, retaddr)) {
58
+ if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
59
+ mmu_idx, nonfault, retaddr)) {
60
/* Non-faulting page table read failed. */
61
*phost = NULL;
62
return TLB_INVALID_MASK;
63
--
85
--
64
2.34.1
86
2.34.1
65
87
66
88
diff view generated by jsdifflib
1
Now that we have collected all of the page data into
1
This is a step toward making TranslationBlock agnostic
2
CPUTLBEntryFull, provide an interface to record that
2
to the address size of the guest.
3
all in one go, instead of using 4 arguments. This interface
4
allows CPUTLBEntryFull to be extended without having to
5
change the number of arguments.
6
3
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
include/exec/cpu-defs.h | 14 +++++++++++
7
include/exec/exec-all.h | 4 ++--
13
include/exec/exec-all.h | 22 ++++++++++++++++++
8
1 file changed, 2 insertions(+), 2 deletions(-)
14
accel/tcg/cputlb.c | 51 ++++++++++++++++++++++++++---------------
15
3 files changed, 69 insertions(+), 18 deletions(-)
16
9
17
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-defs.h
20
+++ b/include/exec/cpu-defs.h
21
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntryFull {
22
* + the offset within the target MemoryRegion (otherwise)
23
*/
24
hwaddr xlat_section;
25
+
26
+ /*
27
+ * @phys_addr contains the physical address in the address space
28
+ * given by cpu_asidx_from_attrs(cpu, @attrs).
29
+ */
30
+ hwaddr phys_addr;
31
+
32
+ /* @attrs contains the memory transaction attributes for the page. */
33
MemTxAttrs attrs;
34
+
35
+ /* @prot contains the complete protections for the page. */
36
+ uint8_t prot;
37
+
38
+ /* @lg_page_size contains the log2 of the page size. */
39
+ uint8_t lg_page_size;
40
} CPUTLBEntryFull;
41
42
/*
43
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
10
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
44
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
45
--- a/include/exec/exec-all.h
12
--- a/include/exec/exec-all.h
46
+++ b/include/exec/exec-all.h
13
+++ b/include/exec/exec-all.h
47
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
14
@@ -XXX,XX +XXX,XX @@
48
uint16_t idxmap,
15
addresses in userspace mode. Define tb_page_addr_t to be an appropriate
49
unsigned bits);
16
type. */
50
17
#if defined(CONFIG_USER_ONLY)
51
+/**
18
-typedef abi_ulong tb_page_addr_t;
52
+ * tlb_set_page_full:
19
-#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
53
+ * @cpu: CPU context
20
+typedef vaddr tb_page_addr_t;
54
+ * @mmu_idx: mmu index of the tlb to modify
21
+#define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
55
+ * @vaddr: virtual address of the entry to add
22
#else
56
+ * @full: the details of the tlb entry
23
typedef ram_addr_t tb_page_addr_t;
57
+ *
24
#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
58
+ * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
59
+ * @full must be filled, except for xlat_section, and constitute
60
+ * the complete description of the translated page.
61
+ *
62
+ * This is generally called by the target tlb_fill function after
63
+ * having performed a successful page table walk to find the physical
64
+ * address and attributes for the translation.
65
+ *
66
+ * At most one entry for a given virtual address is permitted. Only a
67
+ * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
68
+ * used by tlb_flush_page.
69
+ */
70
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
71
+ CPUTLBEntryFull *full);
72
+
73
/**
74
* tlb_set_page_with_attrs:
75
* @cpu: CPU to add this TLB entry for
76
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/accel/tcg/cputlb.c
79
+++ b/accel/tcg/cputlb.c
80
@@ -XXX,XX +XXX,XX @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
81
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
82
}
83
84
-/* Add a new TLB entry. At most one entry for a given virtual address
85
+/*
86
+ * Add a new TLB entry. At most one entry for a given virtual address
87
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
88
* supplied size is only used by tlb_flush_page.
89
*
90
* Called from TCG-generated code, which is under an RCU read-side
91
* critical section.
92
*/
93
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
94
- hwaddr paddr, MemTxAttrs attrs, int prot,
95
- int mmu_idx, target_ulong size)
96
+void tlb_set_page_full(CPUState *cpu, int mmu_idx,
97
+ target_ulong vaddr, CPUTLBEntryFull *full)
98
{
99
CPUArchState *env = cpu->env_ptr;
100
CPUTLB *tlb = env_tlb(env);
101
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
102
CPUTLBEntry *te, tn;
103
hwaddr iotlb, xlat, sz, paddr_page;
104
target_ulong vaddr_page;
105
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
106
- int wp_flags;
107
+ int asidx, wp_flags, prot;
108
bool is_ram, is_romd;
109
110
assert_cpu_is_self(cpu);
111
112
- if (size <= TARGET_PAGE_SIZE) {
113
+ if (full->lg_page_size <= TARGET_PAGE_BITS) {
114
sz = TARGET_PAGE_SIZE;
115
} else {
116
- tlb_add_large_page(env, mmu_idx, vaddr, size);
117
- sz = size;
118
+ sz = (hwaddr)1 << full->lg_page_size;
119
+ tlb_add_large_page(env, mmu_idx, vaddr, sz);
120
}
121
vaddr_page = vaddr & TARGET_PAGE_MASK;
122
- paddr_page = paddr & TARGET_PAGE_MASK;
123
+ paddr_page = full->phys_addr & TARGET_PAGE_MASK;
124
125
+ prot = full->prot;
126
+ asidx = cpu_asidx_from_attrs(cpu, full->attrs);
127
section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
128
- &xlat, &sz, attrs, &prot);
129
+ &xlat, &sz, full->attrs, &prot);
130
assert(sz >= TARGET_PAGE_SIZE);
131
132
tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
133
" prot=%x idx=%d\n",
134
- vaddr, paddr, prot, mmu_idx);
135
+ vaddr, full->phys_addr, prot, mmu_idx);
136
137
address = vaddr_page;
138
- if (size < TARGET_PAGE_SIZE) {
139
+ if (full->lg_page_size < TARGET_PAGE_BITS) {
140
/* Repeat the MMU check and TLB fill on every access. */
141
address |= TLB_INVALID_MASK;
142
}
143
- if (attrs.byte_swap) {
144
+ if (full->attrs.byte_swap) {
145
address |= TLB_BSWAP;
146
}
147
148
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
149
* subtract here is that of the page base, and not the same as the
150
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
151
*/
152
+ desc->fulltlb[index] = *full;
153
desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
154
- desc->fulltlb[index].attrs = attrs;
155
+ desc->fulltlb[index].phys_addr = paddr_page;
156
+ desc->fulltlb[index].prot = prot;
157
158
/* Now calculate the new entry */
159
tn.addend = addend - vaddr_page;
160
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
161
qemu_spin_unlock(&tlb->c.lock);
162
}
163
164
-/* Add a new TLB entry, but without specifying the memory
165
- * transaction attributes to be used.
166
- */
167
+void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
168
+ hwaddr paddr, MemTxAttrs attrs, int prot,
169
+ int mmu_idx, target_ulong size)
170
+{
171
+ CPUTLBEntryFull full = {
172
+ .phys_addr = paddr,
173
+ .attrs = attrs,
174
+ .prot = prot,
175
+ .lg_page_size = ctz64(size)
176
+ };
177
+
178
+ assert(is_power_of_2(size));
179
+ tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
180
+}
181
+
182
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
183
hwaddr paddr, int prot,
184
int mmu_idx, target_ulong size)
185
--
25
--
186
2.34.1
26
2.34.1
187
27
188
28
diff view generated by jsdifflib
1
Let tb->page_addr[0] contain the address of the first byte of the
1
This makes TranslationBlock agnostic to the address size of the guest.
2
translated block, rather than the address of the page containing the
2
Use vaddr for pc, since that's always a virtual address.
3
start of the translated block. We need to recover this value anyway
3
Use uint64_t for cs_base, since usage varies between guests.
4
at various points, and it is easier to discard a page offset when it
5
is not needed, which happens naturally via the existing find_page shift.
6
4
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
accel/tcg/cpu-exec.c | 16 ++++++++--------
8
include/exec/exec-all.h | 4 ++--
11
accel/tcg/cputlb.c | 3 ++-
9
accel/tcg/cpu-exec.c | 2 +-
12
accel/tcg/translate-all.c | 9 +++++----
10
2 files changed, 3 insertions(+), 3 deletions(-)
13
3 files changed, 15 insertions(+), 13 deletions(-)
14
11
12
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/exec-all.h
15
+++ b/include/exec/exec-all.h
16
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
17
* Unwind information is taken as offsets from the page, to be
18
* deposited into the "current" PC.
19
*/
20
- target_ulong pc;
21
+ vaddr pc;
22
23
/*
24
* Target-specific data associated with the TranslationBlock, e.g.:
25
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
26
* s390x: instruction data for EXECUTE,
27
* sparc: the next pc of the instruction queue (for delay slots).
28
*/
29
- target_ulong cs_base;
30
+ uint64_t cs_base;
31
32
uint32_t flags; /* flags defining in which context the code was generated */
33
uint32_t cflags; /* compile flags */
15
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
34
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
16
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cpu-exec.c
36
--- a/accel/tcg/cpu-exec.c
18
+++ b/accel/tcg/cpu-exec.c
37
+++ b/accel/tcg/cpu-exec.c
19
@@ -XXX,XX +XXX,XX @@ struct tb_desc {
38
@@ -XXX,XX +XXX,XX @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
20
target_ulong pc;
21
target_ulong cs_base;
22
CPUArchState *env;
23
- tb_page_addr_t phys_page1;
24
+ tb_page_addr_t page_addr0;
25
uint32_t flags;
26
uint32_t cflags;
27
uint32_t trace_vcpu_dstate;
28
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
29
const struct tb_desc *desc = d;
30
31
if (tb->pc == desc->pc &&
32
- tb->page_addr[0] == desc->phys_page1 &&
33
+ tb->page_addr[0] == desc->page_addr0 &&
34
tb->cs_base == desc->cs_base &&
35
tb->flags == desc->flags &&
36
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
37
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
38
if (tb->page_addr[1] == -1) {
39
return true;
40
} else {
41
- tb_page_addr_t phys_page2;
42
- target_ulong virt_page2;
43
+ tb_page_addr_t phys_page1;
44
+ target_ulong virt_page1;
45
46
/*
47
* We know that the first page matched, and an otherwise valid TB
48
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
49
* is different for the new TB. Therefore any exception raised
50
* here by the faulting lookup is not premature.
51
*/
52
- virt_page2 = TARGET_PAGE_ALIGN(desc->pc);
53
- phys_page2 = get_page_addr_code(desc->env, virt_page2);
54
- if (tb->page_addr[1] == phys_page2) {
55
+ virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
56
+ phys_page1 = get_page_addr_code(desc->env, virt_page1);
57
+ if (tb->page_addr[1] == phys_page1) {
58
return true;
59
}
60
}
61
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
62
if (phys_pc == -1) {
63
return NULL;
64
}
65
- desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
66
+ desc.page_addr0 = phys_pc;
67
h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
68
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
69
}
70
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
71
index XXXXXXX..XXXXXXX 100644
72
--- a/accel/tcg/cputlb.c
73
+++ b/accel/tcg/cputlb.c
74
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
75
can be detected */
76
void tlb_protect_code(ram_addr_t ram_addr)
77
{
39
{
78
- cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
40
if (qemu_log_in_addr_range(pc)) {
79
+ cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
41
qemu_log_mask(CPU_LOG_EXEC,
80
+ TARGET_PAGE_SIZE,
42
- "Trace %d: %p [" TARGET_FMT_lx
81
DIRTY_MEMORY_CODE);
43
+ "Trace %d: %p [%08" PRIx64
82
}
44
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
83
45
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
84
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
46
tb->flags, tb->cflags, lookup_symbol(pc));
85
index XXXXXXX..XXXXXXX 100644
86
--- a/accel/tcg/translate-all.c
87
+++ b/accel/tcg/translate-all.c
88
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
89
qemu_spin_unlock(&tb->jmp_lock);
90
91
/* remove the TB from the hash list */
92
- phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
93
+ phys_pc = tb->page_addr[0];
94
h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
95
tb->trace_vcpu_dstate);
96
if (!qht_remove(&tb_ctx.htable, tb, h)) {
97
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
98
* we can only insert TBs that are fully initialized.
99
*/
100
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
101
- tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
102
+ tb_page_add(p, tb, 0, phys_pc);
103
if (p2) {
104
tb_page_add(p2, tb, 1, phys_page2);
105
} else {
106
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
107
if (n == 0) {
108
/* NOTE: tb_end may be after the end of the page, but
109
it is not a problem */
110
- tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
111
+ tb_start = tb->page_addr[0];
112
tb_end = tb_start + tb->size;
113
} else {
114
tb_start = tb->page_addr[1];
115
- tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
116
+ tb_end = tb_start + ((tb->page_addr[0] + tb->size)
117
+ & ~TARGET_PAGE_MASK);
118
}
119
if (!(tb_end <= start || tb_start >= end)) {
120
#ifdef TARGET_HAS_PRECISE_SMC
121
--
47
--
122
2.34.1
48
2.34.1
123
49
124
50
diff view generated by jsdifflib
1
Prepare for targets to be able to produce TBs that can
1
This is all that is required by tcg/ from exec-all.h.
2
run in more than one virtual context.
3
2
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/internal.h | 4 +++
6
include/exec/exec-all.h | 132 +--------------------------
8
accel/tcg/tb-jmp-cache.h | 41 +++++++++++++++++++++++++
7
include/exec/translation-block.h | 149 +++++++++++++++++++++++++++++++
9
include/exec/cpu-defs.h | 3 ++
8
tcg/tcg-op-ldst.c | 2 +-
10
include/exec/exec-all.h | 32 ++++++++++++++++++--
9
3 files changed, 151 insertions(+), 132 deletions(-)
11
accel/tcg/cpu-exec.c | 16 ++++++----
10
create mode 100644 include/exec/translation-block.h
12
accel/tcg/translate-all.c | 64 ++++++++++++++++++++++++++-------------
13
6 files changed, 131 insertions(+), 29 deletions(-)
14
11
15
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/internal.h
18
+++ b/accel/tcg/internal.h
19
@@ -XXX,XX +XXX,XX @@ void tb_htable_init(void);
20
/* Return the current PC from CPU, which may be cached in TB. */
21
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
22
{
23
+#if TARGET_TB_PCREL
24
+ return cpu->cc->get_pc(cpu);
25
+#else
26
return tb_pc(tb);
27
+#endif
28
}
29
30
#endif /* ACCEL_TCG_INTERNAL_H */
31
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/accel/tcg/tb-jmp-cache.h
34
+++ b/accel/tcg/tb-jmp-cache.h
35
@@ -XXX,XX +XXX,XX @@
36
37
/*
38
* Accessed in parallel; all accesses to 'tb' must be atomic.
39
+ * For TARGET_TB_PCREL, accesses to 'pc' must be protected by
40
+ * a load_acquire/store_release to 'tb'.
41
*/
42
struct CPUJumpCache {
43
struct {
44
TranslationBlock *tb;
45
+#if TARGET_TB_PCREL
46
+ target_ulong pc;
47
+#endif
48
} array[TB_JMP_CACHE_SIZE];
49
};
50
51
+static inline TranslationBlock *
52
+tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
53
+{
54
+#if TARGET_TB_PCREL
55
+ /* Use acquire to ensure current load of pc from jc. */
56
+ return qatomic_load_acquire(&jc->array[hash].tb);
57
+#else
58
+ /* Use rcu_read to ensure current load of pc from *tb. */
59
+ return qatomic_rcu_read(&jc->array[hash].tb);
60
+#endif
61
+}
62
+
63
+static inline target_ulong
64
+tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
65
+{
66
+#if TARGET_TB_PCREL
67
+ return jc->array[hash].pc;
68
+#else
69
+ return tb_pc(tb);
70
+#endif
71
+}
72
+
73
+static inline void
74
+tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
75
+ TranslationBlock *tb, target_ulong pc)
76
+{
77
+#if TARGET_TB_PCREL
78
+ jc->array[hash].pc = pc;
79
+ /* Use store_release on tb to ensure pc is written first. */
80
+ qatomic_store_release(&jc->array[hash].tb, tb);
81
+#else
82
+ /* Use the pc value already stored in tb->pc. */
83
+ qatomic_set(&jc->array[hash].tb, tb);
84
+#endif
85
+}
86
+
87
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
88
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
89
index XXXXXXX..XXXXXXX 100644
90
--- a/include/exec/cpu-defs.h
91
+++ b/include/exec/cpu-defs.h
92
@@ -XXX,XX +XXX,XX @@
93
# error TARGET_PAGE_BITS must be defined in cpu-param.h
94
# endif
95
#endif
96
+#ifndef TARGET_TB_PCREL
97
+# define TARGET_TB_PCREL 0
98
+#endif
99
100
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
101
102
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
12
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
103
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
104
--- a/include/exec/exec-all.h
14
--- a/include/exec/exec-all.h
105
+++ b/include/exec/exec-all.h
15
+++ b/include/exec/exec-all.h
106
@@ -XXX,XX +XXX,XX @@ struct tb_tc {
16
@@ -XXX,XX +XXX,XX @@
107
};
17
#ifdef CONFIG_TCG
108
18
#include "exec/cpu_ldst.h"
109
struct TranslationBlock {
19
#endif
110
- target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
20
-#include "qemu/interval-tree.h"
111
- target_ulong cs_base; /* CS base for this block */
21
+#include "exec/translation-block.h"
112
+#if !TARGET_TB_PCREL
22
#include "qemu/clang-tsa.h"
23
24
-/* Page tracking code uses ram addresses in system mode, and virtual
25
- addresses in userspace mode. Define tb_page_addr_t to be an appropriate
26
- type. */
27
-#if defined(CONFIG_USER_ONLY)
28
-typedef vaddr tb_page_addr_t;
29
-#define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
30
-#else
31
-typedef ram_addr_t tb_page_addr_t;
32
-#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
33
-#endif
34
-
35
/**
36
* cpu_unwind_state_data:
37
* @cpu: the cpu context
38
@@ -XXX,XX +XXX,XX @@ int probe_access_full(CPUArchState *env, target_ulong addr, int size,
39
CPUTLBEntryFull **pfull, uintptr_t retaddr);
40
#endif
41
42
-#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
43
-
44
/* Estimated block size for TB allocation. */
45
/* ??? The following is based on a 2015 survey of x86_64 host output.
46
Better would seem to be some sort of dynamically sized TB array,
47
@@ -XXX,XX +XXX,XX @@ int probe_access_full(CPUArchState *env, target_ulong addr, int size,
48
#define CODE_GEN_AVG_BLOCK_SIZE 150
49
#endif
50
51
-/*
52
- * Translation Cache-related fields of a TB.
53
- * This struct exists just for convenience; we keep track of TB's in a binary
54
- * search tree, and the only fields needed to compare TB's in the tree are
55
- * @ptr and @size.
56
- * Note: the address of search data can be obtained by adding @size to @ptr.
57
- */
58
-struct tb_tc {
59
- const void *ptr; /* pointer to the translated code */
60
- size_t size;
61
-};
62
-
63
-struct TranslationBlock {
64
- /*
65
- * Guest PC corresponding to this block. This must be the true
66
- * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
67
- * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
68
- * privilege, must store those bits elsewhere.
69
- *
70
- * If CF_PCREL, the opcodes for the TranslationBlock are written
71
- * such that the TB is associated only with the physical page and
72
- * may be run in any virtual address context. In this case, PC
73
- * must always be taken from ENV in a target-specific manner.
74
- * Unwind information is taken as offsets from the page, to be
75
- * deposited into the "current" PC.
76
- */
77
- vaddr pc;
78
-
79
- /*
80
- * Target-specific data associated with the TranslationBlock, e.g.:
81
- * x86: the original user, the Code Segment virtual base,
82
- * arm: an extension of tb->flags,
83
- * s390x: instruction data for EXECUTE,
84
- * sparc: the next pc of the instruction queue (for delay slots).
85
- */
86
- uint64_t cs_base;
87
-
88
- uint32_t flags; /* flags defining in which context the code was generated */
89
- uint32_t cflags; /* compile flags */
90
-
91
-/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
92
-#define CF_COUNT_MASK 0x000001ff
93
-#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
94
-#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
95
-#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
96
-#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
97
-#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
98
-#define CF_USE_ICOUNT 0x00020000
99
-#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
100
-#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
101
-#define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */
102
-#define CF_PCREL 0x00200000 /* Opcodes in TB are PC-relative */
103
-#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
104
-#define CF_CLUSTER_SHIFT 24
105
-
106
- /*
107
- * Above fields used for comparing
108
- */
109
-
110
- /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
111
- uint16_t size;
112
- uint16_t icount;
113
-
114
- struct tb_tc tc;
115
-
116
- /*
117
- * Track tb_page_addr_t intervals that intersect this TB.
118
- * For user-only, the virtual addresses are always contiguous,
119
- * and we use a unified interval tree. For system, we use a
120
- * linked list headed in each PageDesc. Within the list, the lsb
121
- * of the previous pointer tells the index of page_next[], and the
122
- * list is protected by the PageDesc lock(s).
123
- */
124
-#ifdef CONFIG_USER_ONLY
125
- IntervalTreeNode itree;
126
-#else
127
- uintptr_t page_next[2];
128
- tb_page_addr_t page_addr[2];
129
-#endif
130
-
131
- /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
132
- QemuSpin jmp_lock;
133
-
134
- /* The following data are used to directly call another TB from
135
- * the code of this one. This can be done either by emitting direct or
136
- * indirect native jump instructions. These jumps are reset so that the TB
137
- * just continues its execution. The TB can be linked to another one by
138
- * setting one of the jump targets (or patching the jump instruction). Only
139
- * two of such jumps are supported.
140
- */
141
-#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
142
- uint16_t jmp_reset_offset[2]; /* offset of original jump target */
143
- uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
144
- uintptr_t jmp_target_addr[2]; /* target address */
145
-
146
- /*
147
- * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
148
- * Each TB can have two outgoing jumps, and therefore can participate
149
- * in two lists. The list entries are kept in jmp_list_next[2]. The least
150
- * significant bit (LSB) of the pointers in these lists is used to encode
151
- * which of the two list entries is to be used in the pointed TB.
152
- *
153
- * List traversals are protected by jmp_lock. The destination TB of each
154
- * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
155
- * can be acquired from any origin TB.
156
- *
157
- * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
158
- * being invalidated, so that no further outgoing jumps from it can be set.
159
- *
160
- * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
161
- * to a destination TB that has CF_INVALID set.
162
- */
163
- uintptr_t jmp_list_head;
164
- uintptr_t jmp_list_next[2];
165
- uintptr_t jmp_dest[2];
166
-};
167
-
168
/* Hide the qatomic_read to make code a little easier on the eyes */
169
static inline uint32_t tb_cflags(const TranslationBlock *tb)
170
{
171
diff --git a/include/exec/translation-block.h b/include/exec/translation-block.h
172
new file mode 100644
173
index XXXXXXX..XXXXXXX
174
--- /dev/null
175
+++ b/include/exec/translation-block.h
176
@@ -XXX,XX +XXX,XX @@
177
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
178
+/*
179
+ * Definition of TranslationBlock.
180
+ * Copyright (c) 2003 Fabrice Bellard
181
+ */
182
+
183
+#ifndef EXEC_TRANSLATION_BLOCK_H
184
+#define EXEC_TRANSLATION_BLOCK_H
185
+
186
+#include "qemu/atomic.h"
187
+#include "qemu/thread.h"
188
+#include "qemu/interval-tree.h"
189
+#include "exec/cpu-common.h"
190
+#include "exec/target_page.h"
191
+
192
+/*
193
+ * Page tracking code uses ram addresses in system mode, and virtual
194
+ * addresses in userspace mode. Define tb_page_addr_t to be an
195
+ * appropriate type.
196
+ */
197
+#if defined(CONFIG_USER_ONLY)
198
+typedef vaddr tb_page_addr_t;
199
+#define TB_PAGE_ADDR_FMT "%" VADDR_PRIx
200
+#else
201
+typedef ram_addr_t tb_page_addr_t;
202
+#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
203
+#endif
204
+
205
+/*
206
+ * Translation Cache-related fields of a TB.
207
+ * This struct exists just for convenience; we keep track of TB's in a binary
208
+ * search tree, and the only fields needed to compare TB's in the tree are
209
+ * @ptr and @size.
210
+ * Note: the address of search data can be obtained by adding @size to @ptr.
211
+ */
212
+struct tb_tc {
213
+ const void *ptr; /* pointer to the translated code */
214
+ size_t size;
215
+};
216
+
217
+struct TranslationBlock {
113
+ /*
218
+ /*
114
+ * Guest PC corresponding to this block. This must be the true
219
+ * Guest PC corresponding to this block. This must be the true
115
+ * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
220
+ * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
116
+ * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
221
+ * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
117
+ * privilege, must store those bits elsewhere.
222
+ * privilege, must store those bits elsewhere.
118
+ *
223
+ *
119
+ * If TARGET_TB_PCREL, the opcodes for the TranslationBlock are
224
+ * If CF_PCREL, the opcodes for the TranslationBlock are written
120
+ * written such that the TB is associated only with the physical
225
+ * such that the TB is associated only with the physical page and
121
+ * page and may be run in any virtual address context. In this case,
226
+ * may be run in any virtual address context. In this case, PC
122
+ * PC must always be taken from ENV in a target-specific manner.
227
+ * must always be taken from ENV in a target-specific manner.
123
+ * Unwind information is taken as offsets from the page, to be
228
+ * Unwind information is taken as offsets from the page, to be
124
+ * deposited into the "current" PC.
229
+ * deposited into the "current" PC.
125
+ */
230
+ */
126
+ target_ulong pc;
231
+ vaddr pc;
127
+#endif
128
+
232
+
129
+ /*
233
+ /*
130
+ * Target-specific data associated with the TranslationBlock, e.g.:
234
+ * Target-specific data associated with the TranslationBlock, e.g.:
131
+ * x86: the original user, the Code Segment virtual base,
235
+ * x86: the original user, the Code Segment virtual base,
132
+ * arm: an extension of tb->flags,
236
+ * arm: an extension of tb->flags,
133
+ * s390x: instruction data for EXECUTE,
237
+ * s390x: instruction data for EXECUTE,
134
+ * sparc: the next pc of the instruction queue (for delay slots).
238
+ * sparc: the next pc of the instruction queue (for delay slots).
135
+ */
239
+ */
136
+ target_ulong cs_base;
240
+ uint64_t cs_base;
137
+
241
+
138
uint32_t flags; /* flags defining in which context the code was generated */
242
+ uint32_t flags; /* flags defining in which context the code was generated */
139
uint32_t cflags; /* compile flags */
243
+ uint32_t cflags; /* compile flags */
140
244
+
141
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
245
+/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
142
/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */
246
+#define CF_COUNT_MASK 0x000001ff
143
static inline target_ulong tb_pc(const TranslationBlock *tb)
247
+#define CF_NO_GOTO_TB 0x00000200 /* Do not chain with goto_tb */
144
{
248
+#define CF_NO_GOTO_PTR 0x00000400 /* Do not chain with goto_ptr */
145
+#if TARGET_TB_PCREL
249
+#define CF_SINGLE_STEP 0x00000800 /* gdbstub single-step in effect */
146
+ qemu_build_not_reached();
250
+#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
251
+#define CF_MEMI_ONLY 0x00010000 /* Only instrument memory ops */
252
+#define CF_USE_ICOUNT 0x00020000
253
+#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
254
+#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
255
+#define CF_NOIRQ 0x00100000 /* Generate an uninterruptible TB */
256
+#define CF_PCREL 0x00200000 /* Opcodes in TB are PC-relative */
257
+#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
258
+#define CF_CLUSTER_SHIFT 24
259
+
260
+ /*
261
+ * Above fields used for comparing
262
+ */
263
+
264
+ /* size of target code for this block (1 <= size <= TARGET_PAGE_SIZE) */
265
+ uint16_t size;
266
+ uint16_t icount;
267
+
268
+ struct tb_tc tc;
269
+
270
+ /*
271
+ * Track tb_page_addr_t intervals that intersect this TB.
272
+ * For user-only, the virtual addresses are always contiguous,
273
+ * and we use a unified interval tree. For system, we use a
274
+ * linked list headed in each PageDesc. Within the list, the lsb
275
+ * of the previous pointer tells the index of page_next[], and the
276
+ * list is protected by the PageDesc lock(s).
277
+ */
278
+#ifdef CONFIG_USER_ONLY
279
+ IntervalTreeNode itree;
147
+#else
280
+#else
148
return tb->pc;
281
+ uintptr_t page_next[2];
282
+ tb_page_addr_t page_addr[2];
149
+#endif
283
+#endif
150
}
284
+
151
285
+ /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
152
/* Hide the qatomic_read to make code a little easier on the eyes */
286
+ QemuSpin jmp_lock;
153
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
287
+
288
+ /* The following data are used to directly call another TB from
289
+ * the code of this one. This can be done either by emitting direct or
290
+ * indirect native jump instructions. These jumps are reset so that the TB
291
+ * just continues its execution. The TB can be linked to another one by
292
+ * setting one of the jump targets (or patching the jump instruction). Only
293
+ * two of such jumps are supported.
294
+ */
295
+#define TB_JMP_OFFSET_INVALID 0xffff /* indicates no jump generated */
296
+ uint16_t jmp_reset_offset[2]; /* offset of original jump target */
297
+ uint16_t jmp_insn_offset[2]; /* offset of direct jump insn */
298
+ uintptr_t jmp_target_addr[2]; /* target address */
299
+
300
+ /*
301
+ * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
302
+ * Each TB can have two outgoing jumps, and therefore can participate
303
+ * in two lists. The list entries are kept in jmp_list_next[2]. The least
304
+ * significant bit (LSB) of the pointers in these lists is used to encode
305
+ * which of the two list entries is to be used in the pointed TB.
306
+ *
307
+ * List traversals are protected by jmp_lock. The destination TB of each
308
+ * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
309
+ * can be acquired from any origin TB.
310
+ *
311
+ * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
312
+ * being invalidated, so that no further outgoing jumps from it can be set.
313
+ *
314
+ * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
315
+ * to a destination TB that has CF_INVALID set.
316
+ */
317
+ uintptr_t jmp_list_head;
318
+ uintptr_t jmp_list_next[2];
319
+ uintptr_t jmp_dest[2];
320
+};
321
+
322
+/* The alignment given to TranslationBlock during allocation. */
323
+#define CODE_GEN_ALIGN 16
324
+
325
+#endif /* EXEC_TRANSLATION_BLOCK_H */
326
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
154
index XXXXXXX..XXXXXXX 100644
327
index XXXXXXX..XXXXXXX 100644
155
--- a/accel/tcg/cpu-exec.c
328
--- a/tcg/tcg-op-ldst.c
156
+++ b/accel/tcg/cpu-exec.c
329
+++ b/tcg/tcg-op-ldst.c
157
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
330
@@ -XXX,XX +XXX,XX @@
158
const TranslationBlock *tb = p;
159
const struct tb_desc *desc = d;
160
161
- if (tb_pc(tb) == desc->pc &&
162
+ if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
163
tb->page_addr[0] == desc->page_addr0 &&
164
tb->cs_base == desc->cs_base &&
165
tb->flags == desc->flags &&
166
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
167
return NULL;
168
}
169
desc.page_addr0 = phys_pc;
170
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
171
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
172
+ flags, cflags, *cpu->trace_dstate);
173
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
174
}
175
176
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
177
uint32_t flags, uint32_t cflags)
178
{
179
TranslationBlock *tb;
180
+ CPUJumpCache *jc;
181
uint32_t hash;
182
183
/* we should never be trying to look up an INVALID tb */
184
tcg_debug_assert(!(cflags & CF_INVALID));
185
186
hash = tb_jmp_cache_hash_func(pc);
187
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
188
+ jc = cpu->tb_jmp_cache;
189
+ tb = tb_jmp_cache_get_tb(jc, hash);
190
191
if (likely(tb &&
192
- tb->pc == pc &&
193
+ tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
194
tb->cs_base == cs_base &&
195
tb->flags == flags &&
196
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
197
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
198
if (tb == NULL) {
199
return NULL;
200
}
201
- qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
202
+ tb_jmp_cache_set(jc, hash, tb, pc);
203
return tb;
204
}
205
206
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
207
if (cc->tcg_ops->synchronize_from_tb) {
208
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
209
} else {
210
+ assert(!TARGET_TB_PCREL);
211
assert(cc->set_pc);
212
cc->set_pc(cpu, tb_pc(last_tb));
213
}
214
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
215
* for the fast lookup
216
*/
217
h = tb_jmp_cache_hash_func(pc);
218
- qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
219
+ tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
220
}
221
222
#ifndef CONFIG_USER_ONLY
223
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
224
index XXXXXXX..XXXXXXX 100644
225
--- a/accel/tcg/translate-all.c
226
+++ b/accel/tcg/translate-all.c
227
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
228
229
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
230
if (i == 0) {
231
- prev = (j == 0 ? tb_pc(tb) : 0);
232
+ prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
233
} else {
234
prev = tcg_ctx->gen_insn_data[i - 1][j];
235
}
236
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
237
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
238
uintptr_t searched_pc, bool reset_icount)
239
{
240
- target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) };
241
+ target_ulong data[TARGET_INSN_START_WORDS];
242
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
243
CPUArchState *env = cpu->env_ptr;
244
const uint8_t *p = tb->tc.ptr + tb->tc.size;
245
@@ -XXX,XX +XXX,XX @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
246
return -1;
247
}
248
249
+ memset(data, 0, sizeof(data));
250
+ if (!TARGET_TB_PCREL) {
251
+ data[0] = tb_pc(tb);
252
+ }
253
+
254
/* Reconstruct the stored insn data while looking for the point at
255
which the end of the insn exceeds the searched_pc. */
256
for (i = 0; i < num_insns; ++i) {
257
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
258
const TranslationBlock *a = ap;
259
const TranslationBlock *b = bp;
260
261
- return tb_pc(a) == tb_pc(b) &&
262
- a->cs_base == b->cs_base &&
263
- a->flags == b->flags &&
264
- (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
265
- a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
266
- a->page_addr[0] == b->page_addr[0] &&
267
- a->page_addr[1] == b->page_addr[1];
268
+ return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
269
+ a->cs_base == b->cs_base &&
270
+ a->flags == b->flags &&
271
+ (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
272
+ a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
273
+ a->page_addr[0] == b->page_addr[0] &&
274
+ a->page_addr[1] == b->page_addr[1]);
275
}
276
277
void tb_htable_init(void)
278
@@ -XXX,XX +XXX,XX @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
279
qemu_spin_unlock(&dest->jmp_lock);
280
}
281
282
+static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
283
+{
284
+ CPUState *cpu;
285
+
286
+ if (TARGET_TB_PCREL) {
287
+ /* A TB may be at any virtual address */
288
+ CPU_FOREACH(cpu) {
289
+ tcg_flush_jmp_cache(cpu);
290
+ }
291
+ } else {
292
+ uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
293
+
294
+ CPU_FOREACH(cpu) {
295
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
296
+
297
+ if (qatomic_read(&jc->array[h].tb) == tb) {
298
+ qatomic_set(&jc->array[h].tb, NULL);
299
+ }
300
+ }
301
+ }
302
+}
303
+
304
/*
305
* In user-mode, call with mmap_lock held.
306
* In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
307
@@ -XXX,XX +XXX,XX @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
308
*/
331
*/
309
static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
332
310
{
333
#include "qemu/osdep.h"
311
- CPUState *cpu;
334
-#include "exec/exec-all.h"
312
PageDesc *p;
335
#include "tcg/tcg.h"
313
uint32_t h;
336
#include "tcg/tcg-temp-internal.h"
314
tb_page_addr_t phys_pc;
337
#include "tcg/tcg-op-common.h"
315
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
338
#include "tcg/tcg-mo.h"
316
339
+#include "exec/translation-block.h"
317
/* remove the TB from the hash list */
340
#include "exec/plugin-gen.h"
318
phys_pc = tb->page_addr[0];
341
#include "tcg-internal.h"
319
- h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags,
342
320
- tb->trace_vcpu_dstate);
321
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
322
+ tb->flags, orig_cflags, tb->trace_vcpu_dstate);
323
if (!qht_remove(&tb_ctx.htable, tb, h)) {
324
return;
325
}
326
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
327
}
328
329
/* remove the TB from the hash list */
330
- h = tb_jmp_cache_hash_func(tb->pc);
331
- CPU_FOREACH(cpu) {
332
- CPUJumpCache *jc = cpu->tb_jmp_cache;
333
- if (qatomic_read(&jc->array[h].tb) == tb) {
334
- qatomic_set(&jc->array[h].tb, NULL);
335
- }
336
- }
337
+ tb_jmp_cache_inval_tb(tb);
338
339
/* suppress this TB from the two jump lists */
340
tb_remove_from_jmp_list(tb, 0);
341
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
342
}
343
344
/* add in the hash table */
345
- h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags,
346
- tb->trace_vcpu_dstate);
347
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
348
+ tb->flags, tb->cflags, tb->trace_vcpu_dstate);
349
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
350
351
/* remove TB from the page(s) if we couldn't insert it */
352
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
353
354
gen_code_buf = tcg_ctx->code_gen_ptr;
355
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
356
+#if !TARGET_TB_PCREL
357
tb->pc = pc;
358
+#endif
359
tb->cs_base = cs_base;
360
tb->flags = flags;
361
tb->cflags = cflags;
362
--
343
--
363
2.34.1
344
2.34.1
364
345
365
346
diff view generated by jsdifflib
1
Add an interface to return the CPUTLBEntryFull struct
1
The last use was removed with 2ac01d6dafab.
2
that goes with the lookup. The result is not intended
3
to be valid across multiple lookups, so the user must
4
use the results immediately.
5
2
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Fixes: 2ac01d6dafab ("translate-all: use a binary search tree to track TBs in TBContext")
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
include/exec/exec-all.h | 15 +++++++++++++
7
include/exec/exec-all.h | 10 ----------
12
include/qemu/typedefs.h | 1 +
8
1 file changed, 10 deletions(-)
13
accel/tcg/cputlb.c | 47 +++++++++++++++++++++++++----------------
14
3 files changed, 45 insertions(+), 18 deletions(-)
15
9
16
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
10
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/exec-all.h
12
--- a/include/exec/exec-all.h
19
+++ b/include/exec/exec-all.h
13
+++ b/include/exec/exec-all.h
20
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
14
@@ -XXX,XX +XXX,XX @@ int probe_access_full(CPUArchState *env, target_ulong addr, int size,
21
MMUAccessType access_type, int mmu_idx,
15
CPUTLBEntryFull **pfull, uintptr_t retaddr);
22
bool nonfault, void **phost, uintptr_t retaddr);
16
#endif
23
17
24
+#ifndef CONFIG_USER_ONLY
18
-/* Estimated block size for TB allocation. */
25
+/**
19
-/* ??? The following is based on a 2015 survey of x86_64 host output.
26
+ * probe_access_full:
20
- Better would seem to be some sort of dynamically sized TB array,
27
+ * Like probe_access_flags, except also return into @pfull.
21
- adapting to the block sizes actually being produced. */
28
+ *
22
-#if defined(CONFIG_SOFTMMU)
29
+ * The CPUTLBEntryFull structure returned via @pfull is transient
23
-#define CODE_GEN_AVG_BLOCK_SIZE 400
30
+ * and must be consumed or copied immediately, before any further
24
-#else
31
+ * access or changes to TLB @mmu_idx.
25
-#define CODE_GEN_AVG_BLOCK_SIZE 150
32
+ */
26
-#endif
33
+int probe_access_full(CPUArchState *env, target_ulong addr,
27
-
34
+ MMUAccessType access_type, int mmu_idx,
28
/* Hide the qatomic_read to make code a little easier on the eyes */
35
+ bool nonfault, void **phost,
29
static inline uint32_t tb_cflags(const TranslationBlock *tb)
36
+ CPUTLBEntryFull **pfull, uintptr_t retaddr);
37
+#endif
38
+
39
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
40
41
/* Estimated block size for TB allocation. */
42
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/include/qemu/typedefs.h
45
+++ b/include/qemu/typedefs.h
46
@@ -XXX,XX +XXX,XX @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
47
typedef struct CPUAddressSpace CPUAddressSpace;
48
typedef struct CPUArchState CPUArchState;
49
typedef struct CPUState CPUState;
50
+typedef struct CPUTLBEntryFull CPUTLBEntryFull;
51
typedef struct DeviceListener DeviceListener;
52
typedef struct DeviceState DeviceState;
53
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
54
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/accel/tcg/cputlb.c
57
+++ b/accel/tcg/cputlb.c
58
@@ -XXX,XX +XXX,XX @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
59
static int probe_access_internal(CPUArchState *env, target_ulong addr,
60
int fault_size, MMUAccessType access_type,
61
int mmu_idx, bool nonfault,
62
- void **phost, uintptr_t retaddr)
63
+ void **phost, CPUTLBEntryFull **pfull,
64
+ uintptr_t retaddr)
65
{
30
{
66
uintptr_t index = tlb_index(env, mmu_idx, addr);
67
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
68
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
69
mmu_idx, nonfault, retaddr)) {
70
/* Non-faulting page table read failed. */
71
*phost = NULL;
72
+ *pfull = NULL;
73
return TLB_INVALID_MASK;
74
}
75
76
/* TLB resize via tlb_fill may have moved the entry. */
77
+ index = tlb_index(env, mmu_idx, addr);
78
entry = tlb_entry(env, mmu_idx, addr);
79
80
/*
81
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
82
}
83
flags &= tlb_addr;
84
85
+ *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
86
+
87
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
88
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
89
*phost = NULL;
90
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
91
return flags;
92
}
93
94
-int probe_access_flags(CPUArchState *env, target_ulong addr,
95
- MMUAccessType access_type, int mmu_idx,
96
- bool nonfault, void **phost, uintptr_t retaddr)
97
+int probe_access_full(CPUArchState *env, target_ulong addr,
98
+ MMUAccessType access_type, int mmu_idx,
99
+ bool nonfault, void **phost, CPUTLBEntryFull **pfull,
100
+ uintptr_t retaddr)
101
{
102
- int flags;
103
-
104
- flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
105
- nonfault, phost, retaddr);
106
+ int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
107
+ nonfault, phost, pfull, retaddr);
108
109
/* Handle clean RAM pages. */
110
if (unlikely(flags & TLB_NOTDIRTY)) {
111
- uintptr_t index = tlb_index(env, mmu_idx, addr);
112
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
113
-
114
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
115
+ notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
116
flags &= ~TLB_NOTDIRTY;
117
}
118
119
return flags;
120
}
121
122
+int probe_access_flags(CPUArchState *env, target_ulong addr,
123
+ MMUAccessType access_type, int mmu_idx,
124
+ bool nonfault, void **phost, uintptr_t retaddr)
125
+{
126
+ CPUTLBEntryFull *full;
127
+
128
+ return probe_access_full(env, addr, access_type, mmu_idx,
129
+ nonfault, phost, &full, retaddr);
130
+}
131
+
132
void *probe_access(CPUArchState *env, target_ulong addr, int size,
133
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
134
{
135
+ CPUTLBEntryFull *full;
136
void *host;
137
int flags;
138
139
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
140
141
flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
142
- false, &host, retaddr);
143
+ false, &host, &full, retaddr);
144
145
/* Per the interface, size == 0 merely faults the access. */
146
if (size == 0) {
147
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
148
}
149
150
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
151
- uintptr_t index = tlb_index(env, mmu_idx, addr);
152
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
153
-
154
/* Handle watchpoints. */
155
if (flags & TLB_WATCHPOINT) {
156
int wp_access = (access_type == MMU_DATA_STORE
157
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
158
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
159
MMUAccessType access_type, int mmu_idx)
160
{
161
+ CPUTLBEntryFull *full;
162
void *host;
163
int flags;
164
165
flags = probe_access_internal(env, addr, 0, access_type,
166
- mmu_idx, true, &host, 0);
167
+ mmu_idx, true, &host, &full, 0);
168
169
/* No combination of flags are expected by the caller. */
170
return flags ? NULL : host;
171
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
172
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
173
void **hostp)
174
{
175
+ CPUTLBEntryFull *full;
176
void *p;
177
178
(void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
179
- cpu_mmu_index(env, true), false, &p, 0);
180
+ cpu_mmu_index(env, true), false, &p, &full, 0);
181
if (p == NULL) {
182
return -1;
183
}
184
--
31
--
185
2.34.1
32
2.34.1
186
33
187
34
diff view generated by jsdifflib
New patch
1
1
The only usage of gen_tb_start and gen_tb_end are here.
2
Move the static icount_start_insn variable into a local
3
within translator_loop. Simplify the two subroutines
4
by passing in the existing local cflags variable.
5
6
Leave only the declaration of gen_io_start in gen-icount.h.
7
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
include/exec/gen-icount.h | 79 +------------------------------------
12
accel/tcg/translator.c | 83 ++++++++++++++++++++++++++++++++++++++-
13
2 files changed, 82 insertions(+), 80 deletions(-)
14
15
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/include/exec/gen-icount.h
18
+++ b/include/exec/gen-icount.h
19
@@ -XXX,XX +XXX,XX @@
20
#ifndef GEN_ICOUNT_H
21
#define GEN_ICOUNT_H
22
23
-#include "exec/exec-all.h"
24
-
25
-/* Helpers for instruction counting code generation. */
26
-
27
-static TCGOp *icount_start_insn;
28
-
29
-static inline void gen_io_start(void)
30
-{
31
- tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
32
- offsetof(ArchCPU, parent_obj.can_do_io) -
33
- offsetof(ArchCPU, env));
34
-}
35
-
36
-static inline void gen_tb_start(const TranslationBlock *tb)
37
-{
38
- TCGv_i32 count = tcg_temp_new_i32();
39
-
40
- tcg_gen_ld_i32(count, cpu_env,
41
- offsetof(ArchCPU, neg.icount_decr.u32) -
42
- offsetof(ArchCPU, env));
43
-
44
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
45
- /*
46
- * We emit a sub with a dummy immediate argument. Keep the insn index
47
- * of the sub so that we later (when we know the actual insn count)
48
- * can update the argument with the actual insn count.
49
- */
50
- tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
51
- icount_start_insn = tcg_last_op();
52
- }
53
-
54
- /*
55
- * Emit the check against icount_decr.u32 to see if we should exit
56
- * unless we suppress the check with CF_NOIRQ. If we are using
57
- * icount and have suppressed interruption the higher level code
58
- * should have ensured we don't run more instructions than the
59
- * budget.
60
- */
61
- if (tb_cflags(tb) & CF_NOIRQ) {
62
- tcg_ctx->exitreq_label = NULL;
63
- } else {
64
- tcg_ctx->exitreq_label = gen_new_label();
65
- tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
66
- }
67
-
68
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
69
- tcg_gen_st16_i32(count, cpu_env,
70
- offsetof(ArchCPU, neg.icount_decr.u16.low) -
71
- offsetof(ArchCPU, env));
72
- /*
73
- * cpu->can_do_io is cleared automatically here at the beginning of
74
- * each translation block. The cost is minimal and only paid for
75
- * -icount, plus it would be very easy to forget doing it in the
76
- * translator. Doing it here means we don't need a gen_io_end() to
77
- * go with gen_io_start().
78
- */
79
- tcg_gen_st_i32(tcg_constant_i32(0), cpu_env,
80
- offsetof(ArchCPU, parent_obj.can_do_io) -
81
- offsetof(ArchCPU, env));
82
- }
83
-}
84
-
85
-static inline void gen_tb_end(const TranslationBlock *tb, int num_insns)
86
-{
87
- if (tb_cflags(tb) & CF_USE_ICOUNT) {
88
- /*
89
- * Update the num_insn immediate parameter now that we know
90
- * the actual insn count.
91
- */
92
- tcg_set_insn_param(icount_start_insn, 2,
93
- tcgv_i32_arg(tcg_constant_i32(num_insns)));
94
- }
95
-
96
- if (tcg_ctx->exitreq_label) {
97
- gen_set_label(tcg_ctx->exitreq_label);
98
- tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
99
- }
100
-}
101
+void gen_io_start(void);
102
103
#endif
104
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/accel/tcg/translator.c
107
+++ b/accel/tcg/translator.c
108
@@ -XXX,XX +XXX,XX @@
109
#include "exec/plugin-gen.h"
110
#include "exec/replay-core.h"
111
112
+
113
+void gen_io_start(void)
114
+{
115
+ tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
116
+ offsetof(ArchCPU, parent_obj.can_do_io) -
117
+ offsetof(ArchCPU, env));
118
+}
119
+
120
+static TCGOp *gen_tb_start(uint32_t cflags)
121
+{
122
+ TCGv_i32 count = tcg_temp_new_i32();
123
+ TCGOp *icount_start_insn = NULL;
124
+
125
+ tcg_gen_ld_i32(count, cpu_env,
126
+ offsetof(ArchCPU, neg.icount_decr.u32) -
127
+ offsetof(ArchCPU, env));
128
+
129
+ if (cflags & CF_USE_ICOUNT) {
130
+ /*
131
+ * We emit a sub with a dummy immediate argument. Keep the insn index
132
+ * of the sub so that we later (when we know the actual insn count)
133
+ * can update the argument with the actual insn count.
134
+ */
135
+ tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
136
+ icount_start_insn = tcg_last_op();
137
+ }
138
+
139
+ /*
140
+ * Emit the check against icount_decr.u32 to see if we should exit
141
+ * unless we suppress the check with CF_NOIRQ. If we are using
142
+ * icount and have suppressed interruption the higher level code
143
+ * should have ensured we don't run more instructions than the
144
+ * budget.
145
+ */
146
+ if (cflags & CF_NOIRQ) {
147
+ tcg_ctx->exitreq_label = NULL;
148
+ } else {
149
+ tcg_ctx->exitreq_label = gen_new_label();
150
+ tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
151
+ }
152
+
153
+ if (cflags & CF_USE_ICOUNT) {
154
+ tcg_gen_st16_i32(count, cpu_env,
155
+ offsetof(ArchCPU, neg.icount_decr.u16.low) -
156
+ offsetof(ArchCPU, env));
157
+ /*
158
+ * cpu->can_do_io is cleared automatically here at the beginning of
159
+ * each translation block. The cost is minimal and only paid for
160
+ * -icount, plus it would be very easy to forget doing it in the
161
+ * translator. Doing it here means we don't need a gen_io_end() to
162
+ * go with gen_io_start().
163
+ */
164
+ tcg_gen_st_i32(tcg_constant_i32(0), cpu_env,
165
+ offsetof(ArchCPU, parent_obj.can_do_io) -
166
+ offsetof(ArchCPU, env));
167
+ }
168
+
169
+ return icount_start_insn;
170
+}
171
+
172
+static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
173
+ TCGOp *icount_start_insn, int num_insns)
174
+{
175
+ if (cflags & CF_USE_ICOUNT) {
176
+ /*
177
+ * Update the num_insn immediate parameter now that we know
178
+ * the actual insn count.
179
+ */
180
+ tcg_set_insn_param(icount_start_insn, 2,
181
+ tcgv_i32_arg(tcg_constant_i32(num_insns)));
182
+ }
183
+
184
+ if (tcg_ctx->exitreq_label) {
185
+ gen_set_label(tcg_ctx->exitreq_label);
186
+ tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
187
+ }
188
+}
189
+
190
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
191
{
192
/* Suppress goto_tb if requested. */
193
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
194
const TranslatorOps *ops, DisasContextBase *db)
195
{
196
uint32_t cflags = tb_cflags(tb);
197
+ TCGOp *icount_start_insn;
198
bool plugin_enabled;
199
200
/* Initialize DisasContext */
201
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
202
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
203
204
/* Start translating. */
205
- gen_tb_start(db->tb);
206
+ icount_start_insn = gen_tb_start(cflags);
207
ops->tb_start(db, cpu);
208
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
209
210
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
211
212
/* Emit code to exit the TB, as indicated by db->is_jmp. */
213
ops->tb_stop(db, cpu);
214
- gen_tb_end(db->tb, db->num_insns);
215
+ gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
216
217
if (plugin_enabled) {
218
plugin_gen_tb_end(cpu);
219
--
220
2.34.1
221
222
diff view generated by jsdifflib
1
This bitmap is created and discarded immediately.
1
New wrapper around gen_io_start which takes care of the USE_ICOUNT
2
We gain nothing by its existence.
2
check, as well as marking the DisasContext to end the TB.
3
Remove exec/gen-icount.h.
3
4
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-Id: <20220822232338.1727934-2-richard.henderson@linaro.org>
7
---
7
---
8
accel/tcg/translate-all.c | 78 ++-------------------------------------
8
MAINTAINERS | 1 -
9
1 file changed, 4 insertions(+), 74 deletions(-)
9
include/exec/gen-icount.h | 6 --
10
include/exec/translator.h | 10 +++
11
target/arm/cpregs.h | 4 +-
12
accel/tcg/translator.c | 27 ++++++-
13
target/alpha/translate.c | 15 +---
14
target/arm/tcg/translate-a64.c | 23 +++---
15
target/arm/tcg/translate-mve.c | 1 -
16
target/arm/tcg/translate-neon.c | 1 -
17
target/arm/tcg/translate-vfp.c | 4 +-
18
target/arm/tcg/translate.c | 20 ++---
19
target/avr/translate.c | 1 -
20
target/cris/translate.c | 2 -
21
target/hppa/translate.c | 5 +-
22
target/i386/tcg/translate.c | 52 +++----------
23
target/loongarch/translate.c | 2 -
24
target/m68k/translate.c | 2 -
25
target/microblaze/translate.c | 2 -
26
target/mips/tcg/translate.c | 29 +++----
27
target/nios2/translate.c | 1 -
28
target/openrisc/translate.c | 9 +--
29
target/ppc/translate.c | 13 +---
30
target/riscv/translate.c | 2 -
31
target/rx/translate.c | 2 -
32
target/s390x/tcg/translate.c | 6 +-
33
target/sh4/translate.c | 2 -
34
target/sparc/translate.c | 75 +++++--------------
35
target/tricore/translate.c | 2 -
36
target/xtensa/translate.c | 27 ++-----
37
target/loongarch/insn_trans/trans_extra.c.inc | 4 +-
38
.../insn_trans/trans_privileged.c.inc | 4 +-
39
.../riscv/insn_trans/trans_privileged.c.inc | 8 +-
40
target/riscv/insn_trans/trans_rvi.c.inc | 24 ++----
41
33 files changed, 117 insertions(+), 269 deletions(-)
42
delete mode 100644 include/exec/gen-icount.h
10
43
11
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
44
diff --git a/MAINTAINERS b/MAINTAINERS
12
index XXXXXXX..XXXXXXX 100644
45
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/translate-all.c
46
--- a/MAINTAINERS
14
+++ b/accel/tcg/translate-all.c
47
+++ b/MAINTAINERS
48
@@ -XXX,XX +XXX,XX @@ F: ui/cocoa.m
49
Main loop
50
M: Paolo Bonzini <pbonzini@redhat.com>
51
S: Maintained
52
-F: include/exec/gen-icount.h
53
F: include/qemu/main-loop.h
54
F: include/sysemu/runstate.h
55
F: include/sysemu/runstate-action.h
56
diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
57
deleted file mode 100644
58
index XXXXXXX..XXXXXXX
59
--- a/include/exec/gen-icount.h
60
+++ /dev/null
15
@@ -XXX,XX +XXX,XX @@
61
@@ -XXX,XX +XXX,XX @@
16
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
62
-#ifndef GEN_ICOUNT_H
63
-#define GEN_ICOUNT_H
64
-
65
-void gen_io_start(void);
66
-
67
-#endif
68
diff --git a/include/exec/translator.h b/include/exec/translator.h
69
index XXXXXXX..XXXXXXX 100644
70
--- a/include/exec/translator.h
71
+++ b/include/exec/translator.h
72
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
73
*/
74
bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
75
76
+/**
77
+ * translator_io_start
78
+ * @db: Disassembly context
79
+ *
80
+ * If icount is enabled, set cpu->can_to_io, adjust db->is_jmp to
81
+ * DISAS_TOO_MANY if it is still DISAS_NEXT, and return true.
82
+ * Otherwise return false.
83
+ */
84
+bool translator_io_start(DisasContextBase *db);
85
+
86
/*
87
* Translator Load Functions
88
*
89
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
90
index XXXXXXX..XXXXXXX 100644
91
--- a/target/arm/cpregs.h
92
+++ b/target/arm/cpregs.h
93
@@ -XXX,XX +XXX,XX @@ enum {
94
ARM_CP_ALIAS = 1 << 8,
95
/*
96
* Flag: Register does I/O and therefore its accesses need to be marked
97
- * with gen_io_start() and also end the TB. In particular, registers which
98
- * implement clocks or timers require this.
99
+ * with translator_io_start() and also end the TB. In particular,
100
+ * registers which implement clocks or timers require this.
101
*/
102
ARM_CP_IO = 1 << 9,
103
/*
104
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/accel/tcg/translator.c
107
+++ b/accel/tcg/translator.c
108
@@ -XXX,XX +XXX,XX @@
109
#include "tcg/tcg.h"
110
#include "tcg/tcg-op.h"
111
#include "exec/exec-all.h"
112
-#include "exec/gen-icount.h"
113
#include "exec/log.h"
114
#include "exec/translator.h"
115
#include "exec/plugin-gen.h"
116
#include "exec/replay-core.h"
117
118
119
-void gen_io_start(void)
120
+static void gen_io_start(void)
121
{
122
tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
123
offsetof(ArchCPU, parent_obj.can_do_io) -
124
offsetof(ArchCPU, env));
125
}
126
127
+bool translator_io_start(DisasContextBase *db)
128
+{
129
+ uint32_t cflags = tb_cflags(db->tb);
130
+
131
+ if (!(cflags & CF_USE_ICOUNT)) {
132
+ return false;
133
+ }
134
+ if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
135
+ /* Already started in translator_loop. */
136
+ return true;
137
+ }
138
+
139
+ gen_io_start();
140
+
141
+ /*
142
+ * Ensure that this instruction will be the last in the TB.
143
+ * The target may override this to something more forceful.
144
+ */
145
+ if (db->is_jmp == DISAS_NEXT) {
146
+ db->is_jmp = DISAS_TOO_MANY;
147
+ }
148
+ return true;
149
+}
150
+
151
static TCGOp *gen_tb_start(uint32_t cflags)
152
{
153
TCGv_i32 count = tcg_temp_new_i32();
154
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/target/alpha/translate.c
157
+++ b/target/alpha/translate.c
158
@@ -XXX,XX +XXX,XX @@ static TCGv cpu_lock_value;
159
static TCGv cpu_pal_ir[31];
17
#endif
160
#endif
18
161
19
-#define SMC_BITMAP_USE_THRESHOLD 10
162
-#include "exec/gen-icount.h"
20
-
163
-
21
typedef struct PageDesc {
164
void alpha_translate_init(void)
22
/* list of TBs intersecting this ram page */
165
{
23
uintptr_t first_tb;
166
#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
24
-#ifdef CONFIG_SOFTMMU
167
@@ -XXX,XX +XXX,XX @@ static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
25
- /* in order to optimize self modifying code, we count the number
168
case 249: /* VMTIME */
26
- of lookups we do to a given page to use a bitmap */
169
helper = gen_helper_get_vmtime;
27
- unsigned long *code_bitmap;
170
do_helper:
28
- unsigned int code_write_count;
171
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
29
-#else
172
- gen_io_start();
30
+#ifdef CONFIG_USER_ONLY
173
+ if (translator_io_start(&ctx->base)) {
31
unsigned long flags;
174
helper(va);
32
void *target_data;
175
return DISAS_PC_STALE;
33
#endif
176
} else {
34
-#ifndef CONFIG_USER_ONLY
177
@@ -XXX,XX +XXX,XX @@ static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
35
+#ifdef CONFIG_SOFTMMU
178
36
QemuSpin lock;
179
case 251:
37
#endif
180
/* ALARM */
38
} PageDesc;
181
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
39
@@ -XXX,XX +XXX,XX @@ void tb_htable_init(void)
182
- gen_io_start();
40
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
183
+ if (translator_io_start(&ctx->base)) {
41
}
184
ret = DISAS_PC_STALE;
42
43
-/* call with @p->lock held */
44
-static inline void invalidate_page_bitmap(PageDesc *p)
45
-{
46
- assert_page_locked(p);
47
-#ifdef CONFIG_SOFTMMU
48
- g_free(p->code_bitmap);
49
- p->code_bitmap = NULL;
50
- p->code_write_count = 0;
51
-#endif
52
-}
53
-
54
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
55
static void page_flush_tb_1(int level, void **lp)
56
{
57
@@ -XXX,XX +XXX,XX @@ static void page_flush_tb_1(int level, void **lp)
58
for (i = 0; i < V_L2_SIZE; ++i) {
59
page_lock(&pd[i]);
60
pd[i].first_tb = (uintptr_t)NULL;
61
- invalidate_page_bitmap(pd + i);
62
page_unlock(&pd[i]);
63
}
185
}
64
} else {
186
gen_helper_set_alarm(cpu_env, vb);
65
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
187
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
66
if (rm_from_page_list) {
188
case 0xC000:
67
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
189
/* RPCC */
68
tb_page_remove(p, tb);
190
va = dest_gpr(ctx, ra);
69
- invalidate_page_bitmap(p);
191
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
70
if (tb->page_addr[1] != -1) {
192
- gen_io_start();
71
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
193
- gen_helper_load_pcc(va, cpu_env);
72
tb_page_remove(p, tb);
194
+ if (translator_io_start(&ctx->base)) {
73
- invalidate_page_bitmap(p);
195
ret = DISAS_PC_STALE;
196
- } else {
197
- gen_helper_load_pcc(va, cpu_env);
198
}
199
+ gen_helper_load_pcc(va, cpu_env);
200
break;
201
case 0xE000:
202
/* RC */
203
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
204
index XXXXXXX..XXXXXXX 100644
205
--- a/target/arm/tcg/translate-a64.c
206
+++ b/target/arm/tcg/translate-a64.c
207
@@ -XXX,XX +XXX,XX @@
208
#include "internals.h"
209
#include "qemu/host-utils.h"
210
#include "semihosting/semihost.h"
211
-#include "exec/gen-icount.h"
212
#include "exec/log.h"
213
#include "cpregs.h"
214
#include "translate-a64.h"
215
@@ -XXX,XX +XXX,XX @@ static bool trans_ERET(DisasContext *s, arg_ERET *a)
216
tcg_gen_ld_i64(dst, cpu_env,
217
offsetof(CPUARMState, elr_el[s->current_el]));
218
219
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
220
- gen_io_start();
221
- }
222
+ translator_io_start(&s->base);
223
224
gen_helper_exception_return(cpu_env, dst);
225
/* Must exit loop to check un-masked IRQs */
226
@@ -XXX,XX +XXX,XX @@ static bool trans_ERETA(DisasContext *s, arg_reta *a)
227
offsetof(CPUARMState, elr_el[s->current_el]));
228
229
dst = auth_branch_target(s, dst, cpu_X[31], !a->m);
230
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
231
- gen_io_start();
232
- }
233
+
234
+ translator_io_start(&s->base);
235
236
gen_helper_exception_return(cpu_env, dst);
237
/* Must exit loop to check un-masked IRQs */
238
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
239
uint32_t key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
240
crn, crm, op0, op1, op2);
241
const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
242
+ bool need_exit_tb = false;
243
TCGv_ptr tcg_ri = NULL;
244
TCGv_i64 tcg_rt;
245
246
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
247
return;
248
}
249
250
- if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
251
- gen_io_start();
252
+ if (ri->type & ARM_CP_IO) {
253
+ /* I/O operations must end the TB here (whether read or write) */
254
+ need_exit_tb = translator_io_start(&s->base);
255
}
256
257
tcg_rt = cpu_reg(s, rt);
258
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
74
}
259
}
75
}
260
}
76
261
77
@@ -XXX,XX +XXX,XX @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
262
- if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
263
- /* I/O operations must end the TB here (whether read or write) */
264
- s->base.is_jmp = DISAS_UPDATE_EXIT;
265
- }
266
if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
267
/*
268
* A write to any coprocessor regiser that ends a TB
269
@@ -XXX,XX +XXX,XX @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
270
* but allow this to be suppressed by the register definition
271
* (usually only necessary to work around guest bugs).
272
*/
273
+ need_exit_tb = true;
274
+ }
275
+ if (need_exit_tb) {
276
s->base.is_jmp = DISAS_UPDATE_EXIT;
78
}
277
}
79
}
278
}
80
279
diff --git a/target/arm/tcg/translate-mve.c b/target/arm/tcg/translate-mve.c
81
-#ifdef CONFIG_SOFTMMU
280
index XXXXXXX..XXXXXXX 100644
82
-/* call with @p->lock held */
281
--- a/target/arm/tcg/translate-mve.c
83
-static void build_page_bitmap(PageDesc *p)
282
+++ b/target/arm/tcg/translate-mve.c
84
-{
283
@@ -XXX,XX +XXX,XX @@
85
- int n, tb_start, tb_end;
284
#include "tcg/tcg-op.h"
86
- TranslationBlock *tb;
285
#include "tcg/tcg-op-gvec.h"
87
-
286
#include "exec/exec-all.h"
88
- assert_page_locked(p);
287
-#include "exec/gen-icount.h"
89
- p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
288
#include "translate.h"
90
-
289
#include "translate-a32.h"
91
- PAGE_FOR_EACH_TB(p, tb, n) {
290
92
- /* NOTE: this is subtle as a TB may span two physical pages */
291
diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c
93
- if (n == 0) {
292
index XXXXXXX..XXXXXXX 100644
94
- /* NOTE: tb_end may be after the end of the page, but
293
--- a/target/arm/tcg/translate-neon.c
95
- it is not a problem */
294
+++ b/target/arm/tcg/translate-neon.c
96
- tb_start = tb->pc & ~TARGET_PAGE_MASK;
295
@@ -XXX,XX +XXX,XX @@
97
- tb_end = tb_start + tb->size;
296
#include "tcg/tcg-op.h"
98
- if (tb_end > TARGET_PAGE_SIZE) {
297
#include "tcg/tcg-op-gvec.h"
99
- tb_end = TARGET_PAGE_SIZE;
298
#include "exec/exec-all.h"
100
- }
299
-#include "exec/gen-icount.h"
101
- } else {
300
#include "translate.h"
102
- tb_start = 0;
301
#include "translate-a32.h"
103
- tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
302
303
diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c
304
index XXXXXXX..XXXXXXX 100644
305
--- a/target/arm/tcg/translate-vfp.c
306
+++ b/target/arm/tcg/translate-vfp.c
307
@@ -XXX,XX +XXX,XX @@
308
#include "tcg/tcg-op.h"
309
#include "tcg/tcg-op-gvec.h"
310
#include "exec/exec-all.h"
311
-#include "exec/gen-icount.h"
312
#include "translate.h"
313
#include "translate-a32.h"
314
315
@@ -XXX,XX +XXX,XX @@ static void gen_preserve_fp_state(DisasContext *s, bool skip_context_update)
316
* so we must mark it as an IO operation for icount (and cause
317
* this to be the last insn in the TB).
318
*/
319
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
320
+ if (translator_io_start(&s->base)) {
321
s->base.is_jmp = DISAS_UPDATE_EXIT;
322
- gen_io_start();
323
}
324
gen_helper_v7m_preserve_fp_state(cpu_env);
325
/*
326
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
327
index XXXXXXX..XXXXXXX 100644
328
--- a/target/arm/tcg/translate.c
329
+++ b/target/arm/tcg/translate.c
330
@@ -XXX,XX +XXX,XX @@
331
#include "cpregs.h"
332
#include "translate.h"
333
#include "translate-a32.h"
334
-#include "exec/gen-icount.h"
335
#include "exec/helper-proto.h"
336
337
#define HELPER_H "helper.h"
338
@@ -XXX,XX +XXX,XX @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
339
* appropriately depending on the new Thumb bit, so it must
340
* be called after storing the new PC.
341
*/
342
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
343
- gen_io_start();
344
- }
345
+ translator_io_start(&s->base);
346
gen_helper_cpsr_write_eret(cpu_env, cpsr);
347
/* Must exit loop to check un-masked IRQs */
348
s->base.is_jmp = DISAS_EXIT;
349
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
350
uint32_t key = ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2);
351
const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key);
352
TCGv_ptr tcg_ri = NULL;
353
- bool need_exit_tb;
354
+ bool need_exit_tb = false;
355
uint32_t syndrome;
356
357
/*
358
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
359
g_assert_not_reached();
360
}
361
362
- if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
363
- gen_io_start();
364
+ if (ri->type & ARM_CP_IO) {
365
+ /* I/O operations must end the TB here (whether read or write) */
366
+ need_exit_tb = translator_io_start(&s->base);
367
}
368
369
if (isread) {
370
@@ -XXX,XX +XXX,XX @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
371
}
372
}
373
374
- /* I/O operations must end the TB here (whether read or write) */
375
- need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
376
- (ri->type & ARM_CP_IO));
377
-
378
if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
379
/*
380
* A write to any coprocessor register that ends a TB
381
@@ -XXX,XX +XXX,XX @@ static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
382
if (exc_return) {
383
/* Restore CPSR from SPSR. */
384
tmp = load_cpu_field(spsr);
385
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
386
- gen_io_start();
104
- }
387
- }
105
- bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
388
+ translator_io_start(&s->base);
106
- }
389
gen_helper_cpsr_write_eret(cpu_env, tmp);
107
-}
390
/* Must exit loop to check un-masked IRQs */
108
-#endif
391
s->base.is_jmp = DISAS_EXIT;
109
-
392
diff --git a/target/avr/translate.c b/target/avr/translate.c
110
/* add the tb in the target page and protect it if necessary
393
index XXXXXXX..XXXXXXX 100644
111
*
394
--- a/target/avr/translate.c
112
* Called with mmap_lock held for user-mode emulation.
395
+++ b/target/avr/translate.c
113
@@ -XXX,XX +XXX,XX @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
396
@@ -XXX,XX +XXX,XX @@
114
page_already_protected = p->first_tb != (uintptr_t)NULL;
397
#include "exec/helper-gen.h"
115
#endif
398
#include "exec/log.h"
116
p->first_tb = (uintptr_t)tb | n;
399
#include "exec/translator.h"
117
- invalidate_page_bitmap(p);
400
-#include "exec/gen-icount.h"
118
401
119
#if defined(CONFIG_USER_ONLY)
402
#define HELPER_H "helper.h"
120
/* translator_loop() must have made all TB pages non-writable */
403
#include "exec/helper-info.c.inc"
121
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
404
diff --git a/target/cris/translate.c b/target/cris/translate.c
122
/* remove TB from the page(s) if we couldn't insert it */
405
index XXXXXXX..XXXXXXX 100644
123
if (unlikely(existing_tb)) {
406
--- a/target/cris/translate.c
124
tb_page_remove(p, tb);
407
+++ b/target/cris/translate.c
125
- invalidate_page_bitmap(p);
408
@@ -XXX,XX +XXX,XX @@ static TCGv env_btaken;
126
if (p2) {
409
static TCGv env_btarget;
127
tb_page_remove(p2, tb);
410
static TCGv env_pc;
128
- invalidate_page_bitmap(p2);
411
412
-#include "exec/gen-icount.h"
413
-
414
/* This is the state at translation time. */
415
typedef struct DisasContext {
416
DisasContextBase base;
417
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
418
index XXXXXXX..XXXXXXX 100644
419
--- a/target/hppa/translate.c
420
+++ b/target/hppa/translate.c
421
@@ -XXX,XX +XXX,XX @@ static TCGv_reg cpu_psw_v;
422
static TCGv_reg cpu_psw_cb;
423
static TCGv_reg cpu_psw_cb_msb;
424
425
-#include "exec/gen-icount.h"
426
-
427
void hppa_translate_init(void)
428
{
429
#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
430
@@ -XXX,XX +XXX,XX @@ static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
431
/* FIXME: Respect PSW_S bit. */
432
nullify_over(ctx);
433
tmp = dest_gpr(ctx, rt);
434
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
435
- gen_io_start();
436
+ if (translator_io_start(&ctx->base)) {
437
gen_helper_read_interval_timer(tmp);
438
ctx->base.is_jmp = DISAS_IAQ_N_STALE;
439
} else {
440
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
441
index XXXXXXX..XXXXXXX 100644
442
--- a/target/i386/tcg/translate.c
443
+++ b/target/i386/tcg/translate.c
444
@@ -XXX,XX +XXX,XX @@ static TCGv cpu_seg_base[6];
445
static TCGv_i64 cpu_bndl[4];
446
static TCGv_i64 cpu_bndu[4];
447
448
-#include "exec/gen-icount.h"
449
-
450
typedef struct DisasContext {
451
DisasContextBase base;
452
453
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
454
!(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
455
goto illegal_op;
456
}
457
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
458
- gen_io_start();
459
- s->base.is_jmp = DISAS_TOO_MANY;
460
- }
461
+ translator_io_start(&s->base);
462
gen_helper_rdrand(s->T0, cpu_env);
463
rm = (modrm & 7) | REX_B(s);
464
gen_op_mov_reg_v(s, dflag, rm, s->T0);
465
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
466
SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
467
break;
129
}
468
}
130
tb = existing_tb;
469
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
470
- gen_io_start();
471
- s->base.is_jmp = DISAS_TOO_MANY;
472
- }
473
+ translator_io_start(&s->base);
474
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
475
gen_repz_ins(s, ot);
476
} else {
477
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
478
if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
479
break;
480
}
481
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
482
- gen_io_start();
483
- s->base.is_jmp = DISAS_TOO_MANY;
484
- }
485
+ translator_io_start(&s->base);
486
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
487
gen_repz_outs(s, ot);
488
} else {
489
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
490
if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
491
break;
492
}
493
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
494
- gen_io_start();
495
- s->base.is_jmp = DISAS_TOO_MANY;
496
- }
497
+ translator_io_start(&s->base);
498
gen_helper_in_func(ot, s->T1, s->tmp2_i32);
499
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
500
gen_bpt_io(s, s->tmp2_i32, ot);
501
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
502
if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
503
break;
504
}
505
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
506
- gen_io_start();
507
- s->base.is_jmp = DISAS_TOO_MANY;
508
- }
509
+ translator_io_start(&s->base);
510
gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
511
tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
512
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
513
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
514
if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
515
break;
516
}
517
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
518
- gen_io_start();
519
- s->base.is_jmp = DISAS_TOO_MANY;
520
- }
521
+ translator_io_start(&s->base);
522
gen_helper_in_func(ot, s->T1, s->tmp2_i32);
523
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
524
gen_bpt_io(s, s->tmp2_i32, ot);
525
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
526
if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
527
break;
528
}
529
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
530
- gen_io_start();
531
- s->base.is_jmp = DISAS_TOO_MANY;
532
- }
533
+ translator_io_start(&s->base);
534
gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
535
tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
536
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
537
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
538
case 0x131: /* rdtsc */
539
gen_update_cc_op(s);
540
gen_update_eip_cur(s);
541
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
542
- gen_io_start();
543
- s->base.is_jmp = DISAS_TOO_MANY;
544
- }
545
+ translator_io_start(&s->base);
546
gen_helper_rdtsc(cpu_env);
547
break;
548
case 0x133: /* rdpmc */
549
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
550
}
551
gen_update_cc_op(s);
552
gen_update_eip_cur(s);
553
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
554
- gen_io_start();
555
- s->base.is_jmp = DISAS_TOO_MANY;
556
- }
557
+ translator_io_start(&s->base);
558
gen_helper_rdtscp(cpu_env);
559
break;
560
561
@@ -XXX,XX +XXX,XX @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
562
}
563
ot = (CODE64(s) ? MO_64 : MO_32);
564
565
- if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
566
- gen_io_start();
567
- s->base.is_jmp = DISAS_TOO_MANY;
568
- }
569
+ translator_io_start(&s->base);
570
if (b & 2) {
571
gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
572
gen_op_mov_v_reg(s, ot, s->T0, rm);
573
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
574
index XXXXXXX..XXXXXXX 100644
575
--- a/target/loongarch/translate.c
576
+++ b/target/loongarch/translate.c
577
@@ -XXX,XX +XXX,XX @@
578
TCGv cpu_gpr[32], cpu_pc;
579
static TCGv cpu_lladdr, cpu_llval;
580
581
-#include "exec/gen-icount.h"
582
-
583
#define HELPER_H "helper.h"
584
#include "exec/helper-info.c.inc"
585
#undef HELPER_H
586
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
587
index XXXXXXX..XXXXXXX 100644
588
--- a/target/m68k/translate.c
589
+++ b/target/m68k/translate.c
590
@@ -XXX,XX +XXX,XX @@ static TCGv NULL_QREG;
591
/* Used to distinguish stores from bad addressing modes. */
592
static TCGv store_dummy;
593
594
-#include "exec/gen-icount.h"
595
-
596
void m68k_tcg_init(void)
597
{
598
char *p;
599
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
600
index XXXXXXX..XXXXXXX 100644
601
--- a/target/microblaze/translate.c
602
+++ b/target/microblaze/translate.c
603
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 cpu_iflags;
604
static TCGv cpu_res_addr;
605
static TCGv_i32 cpu_res_val;
606
607
-#include "exec/gen-icount.h"
608
-
609
/* This is the state at translation time. */
610
typedef struct DisasContext {
611
DisasContextBase base;
612
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
613
index XXXXXXX..XXXXXXX 100644
614
--- a/target/mips/tcg/translate.c
615
+++ b/target/mips/tcg/translate.c
616
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 hflags;
617
TCGv_i32 fpu_fcr0, fpu_fcr31;
618
TCGv_i64 fpu_f64[32];
619
620
-#include "exec/gen-icount.h"
621
-
622
static const char regnames_HI[][4] = {
623
"HI0", "HI1", "HI2", "HI3",
624
};
625
@@ -XXX,XX +XXX,XX @@ static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
626
switch (sel) {
627
case CP0_REG09__COUNT:
628
/* Mark as an IO operation because we read the time. */
629
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
630
- gen_io_start();
631
- }
632
+ translator_io_start(&ctx->base);
633
+
634
gen_helper_mfc0_count(arg, cpu_env);
635
/*
636
* Break the TB to be able to take timer interrupts immediately
637
@@ -XXX,XX +XXX,XX @@ cp0_unimplemented:
638
static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
639
{
640
const char *register_name = "invalid";
641
+ bool icount;
642
643
if (sel != 0) {
644
check_insn(ctx, ISA_MIPS_R1);
131
}
645
}
132
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
646
647
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
648
- gen_io_start();
649
- }
650
+ icount = translator_io_start(&ctx->base);
651
652
switch (reg) {
653
case CP0_REGISTER_00:
654
@@ -XXX,XX +XXX,XX @@ static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
655
trace_mips_translate_c0("mtc0", register_name, reg, sel);
656
657
/* For simplicity assume that all writes can cause interrupts. */
658
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
659
+ if (icount) {
660
/*
661
* DISAS_STOP isn't sufficient, we need to ensure we break out of
662
* translated code to check for pending interrupts.
663
@@ -XXX,XX +XXX,XX @@ static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
664
switch (sel) {
665
case CP0_REG09__COUNT:
666
/* Mark as an IO operation because we read the time. */
667
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
668
- gen_io_start();
669
- }
670
+ translator_io_start(&ctx->base);
671
gen_helper_mfc0_count(arg, cpu_env);
672
/*
673
* Break the TB to be able to take timer interrupts immediately
674
@@ -XXX,XX +XXX,XX @@ cp0_unimplemented:
675
static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
676
{
677
const char *register_name = "invalid";
678
+ bool icount;
679
680
if (sel != 0) {
681
check_insn(ctx, ISA_MIPS_R1);
682
}
683
684
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
685
- gen_io_start();
686
- }
687
+ icount = translator_io_start(&ctx->base);
688
689
switch (reg) {
690
case CP0_REGISTER_00:
691
@@ -XXX,XX +XXX,XX @@ static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
692
trace_mips_translate_c0("dmtc0", register_name, reg, sel);
693
694
/* For simplicity assume that all writes can cause interrupts. */
695
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
696
+ if (icount) {
697
/*
698
* DISAS_STOP isn't sufficient, we need to ensure we break out of
699
* translated code to check for pending interrupts.
700
@@ -XXX,XX +XXX,XX @@ void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
701
gen_store_gpr(t0, rt);
702
break;
703
case 2:
704
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
705
- gen_io_start();
706
- }
707
+ translator_io_start(&ctx->base);
708
gen_helper_rdhwr_cc(t0, cpu_env);
709
gen_store_gpr(t0, rt);
710
/*
711
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
712
index XXXXXXX..XXXXXXX 100644
713
--- a/target/nios2/translate.c
714
+++ b/target/nios2/translate.c
715
@@ -XXX,XX +XXX,XX @@
716
#include "exec/cpu_ldst.h"
717
#include "exec/translator.h"
718
#include "qemu/qemu-print.h"
719
-#include "exec/gen-icount.h"
720
#include "semihosting/semihost.h"
721
722
#define HELPER_H "helper.h"
723
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
724
index XXXXXXX..XXXXXXX 100644
725
--- a/target/openrisc/translate.c
726
+++ b/target/openrisc/translate.c
727
@@ -XXX,XX +XXX,XX @@
728
729
#include "exec/helper-proto.h"
730
#include "exec/helper-gen.h"
731
-#include "exec/gen-icount.h"
732
733
#include "exec/log.h"
734
735
@@ -XXX,XX +XXX,XX @@ static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a)
736
737
check_r0_write(dc, a->d);
738
739
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
740
- gen_io_start();
741
+ if (translator_io_start(&dc->base)) {
742
if (dc->delayed_branch) {
743
tcg_gen_mov_tl(cpu_pc, jmp_pc);
744
tcg_gen_discard_tl(jmp_pc);
745
@@ -XXX,XX +XXX,XX @@ static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a)
746
{
747
TCGv spr = tcg_temp_new();
748
749
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
750
- gen_io_start();
751
- }
752
+ translator_io_start(&dc->base);
753
+
754
/*
755
* For SR, we will need to exit the TB to recognize the new
756
* exception state. For NPC, in theory this counts as a branch
757
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
758
index XXXXXXX..XXXXXXX 100644
759
--- a/target/ppc/translate.c
760
+++ b/target/ppc/translate.c
761
@@ -XXX,XX +XXX,XX @@ static TCGv cpu_reserve_val2;
762
static TCGv cpu_fpscr;
763
static TCGv_i32 cpu_access_type;
764
765
-#include "exec/gen-icount.h"
766
-
767
void ppc_translate_init(void)
768
{
769
int i;
770
@@ -XXX,XX +XXX,XX @@ static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
771
772
static void gen_icount_io_start(DisasContext *ctx)
773
{
774
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
775
- gen_io_start();
776
- /*
777
- * An I/O instruction must be last in the TB.
778
- * Chain to the next TB, and let the code from gen_tb_start
779
- * decide if we need to return to the main loop.
780
- * Doing this first also allows this value to be overridden.
781
- */
782
- ctx->base.is_jmp = DISAS_TOO_MANY;
783
- }
784
+ translator_io_start(&ctx->base);
785
}
786
133
#if !defined(CONFIG_USER_ONLY)
787
#if !defined(CONFIG_USER_ONLY)
134
/* if no code remaining, no need to continue to use slow writes */
788
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
135
if (!p->first_tb) {
789
index XXXXXXX..XXXXXXX 100644
136
- invalidate_page_bitmap(p);
790
--- a/target/riscv/translate.c
137
tlb_unprotect_code(start);
791
+++ b/target/riscv/translate.c
792
@@ -XXX,XX +XXX,XX @@ static TCGv load_val;
793
static TCGv pm_mask;
794
static TCGv pm_base;
795
796
-#include "exec/gen-icount.h"
797
-
798
/*
799
* If an operation is being performed on less than TARGET_LONG_BITS,
800
* it may require the inputs to be sign- or zero-extended; which will
801
diff --git a/target/rx/translate.c b/target/rx/translate.c
802
index XXXXXXX..XXXXXXX 100644
803
--- a/target/rx/translate.c
804
+++ b/target/rx/translate.c
805
@@ -XXX,XX +XXX,XX @@ static TCGv_i64 cpu_acc;
806
807
#define cpu_sp cpu_regs[0]
808
809
-#include "exec/gen-icount.h"
810
-
811
/* decoder helper */
812
static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
813
int i, int n)
814
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
815
index XXXXXXX..XXXXXXX 100644
816
--- a/target/s390x/tcg/translate.c
817
+++ b/target/s390x/tcg/translate.c
818
@@ -XXX,XX +XXX,XX @@
819
#include "qemu/log.h"
820
#include "qemu/host-utils.h"
821
#include "exec/cpu_ldst.h"
822
-#include "exec/gen-icount.h"
823
#include "exec/helper-proto.h"
824
#include "exec/helper-gen.h"
825
826
@@ -XXX,XX +XXX,XX @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
827
828
/* input/output is the special case for icount mode */
829
if (unlikely(insn->flags & IF_IO)) {
830
- icount = tb_cflags(s->base.tb) & CF_USE_ICOUNT;
831
- if (icount) {
832
- gen_io_start();
833
- }
834
+ icount = translator_io_start(&s->base);
835
}
836
}
837
838
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
839
index XXXXXXX..XXXXXXX 100644
840
--- a/target/sh4/translate.c
841
+++ b/target/sh4/translate.c
842
@@ -XXX,XX +XXX,XX @@ static TCGv cpu_fregs[32];
843
/* internal register indexes */
844
static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
845
846
-#include "exec/gen-icount.h"
847
-
848
void sh4_translate_init(void)
849
{
850
int i;
851
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
852
index XXXXXXX..XXXXXXX 100644
853
--- a/target/sparc/translate.c
854
+++ b/target/sparc/translate.c
855
@@ -XXX,XX +XXX,XX @@ static TCGv cpu_wim;
856
/* Floating point registers */
857
static TCGv_i64 cpu_fpr[TARGET_DPREGS];
858
859
-#include "exec/gen-icount.h"
860
-
861
typedef struct DisasContext {
862
DisasContextBase base;
863
target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
864
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
865
r_const = tcg_constant_i32(dc->mem_idx);
866
tcg_gen_ld_ptr(r_tickptr, cpu_env,
867
offsetof(CPUSPARCState, tick));
868
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
869
- gen_io_start();
870
+ if (translator_io_start(&dc->base)) {
871
+ dc->base.is_jmp = DISAS_EXIT;
872
}
873
gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
874
r_const);
875
gen_store_gpr(dc, rd, cpu_dst);
876
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
877
- /* I/O operations in icount mode must end the TB */
878
- dc->base.is_jmp = DISAS_EXIT;
879
- }
880
}
881
break;
882
case 0x5: /* V9 rdpc */
883
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
884
r_const = tcg_constant_i32(dc->mem_idx);
885
tcg_gen_ld_ptr(r_tickptr, cpu_env,
886
offsetof(CPUSPARCState, stick));
887
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
888
- gen_io_start();
889
+ if (translator_io_start(&dc->base)) {
890
+ dc->base.is_jmp = DISAS_EXIT;
891
}
892
gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
893
r_const);
894
gen_store_gpr(dc, rd, cpu_dst);
895
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
896
- /* I/O operations in icount mode must end the TB */
897
- dc->base.is_jmp = DISAS_EXIT;
898
- }
899
}
900
break;
901
case 0x19: /* System tick compare */
902
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
903
r_const = tcg_constant_i32(dc->mem_idx);
904
tcg_gen_ld_ptr(r_tickptr, cpu_env,
905
offsetof(CPUSPARCState, tick));
906
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
907
- gen_io_start();
908
+ if (translator_io_start(&dc->base)) {
909
+ dc->base.is_jmp = DISAS_EXIT;
910
}
911
gen_helper_tick_get_count(cpu_tmp0, cpu_env,
912
r_tickptr, r_const);
913
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
914
- /* I/O operations in icount mode must end the TB */
915
- dc->base.is_jmp = DISAS_EXIT;
916
- }
917
}
918
break;
919
case 5: // tba
920
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
921
r_tickptr = tcg_temp_new_ptr();
922
tcg_gen_ld_ptr(r_tickptr, cpu_env,
923
offsetof(CPUSPARCState, tick));
924
- if (tb_cflags(dc->base.tb) &
925
- CF_USE_ICOUNT) {
926
- gen_io_start();
927
- }
928
+ translator_io_start(&dc->base);
929
gen_helper_tick_set_limit(r_tickptr,
930
cpu_tick_cmpr);
931
/* End TB to handle timer interrupt */
932
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
933
r_tickptr = tcg_temp_new_ptr();
934
tcg_gen_ld_ptr(r_tickptr, cpu_env,
935
offsetof(CPUSPARCState, stick));
936
- if (tb_cflags(dc->base.tb) &
937
- CF_USE_ICOUNT) {
938
- gen_io_start();
939
- }
940
+ translator_io_start(&dc->base);
941
gen_helper_tick_set_count(r_tickptr,
942
cpu_tmp0);
943
/* End TB to handle timer interrupt */
944
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
945
r_tickptr = tcg_temp_new_ptr();
946
tcg_gen_ld_ptr(r_tickptr, cpu_env,
947
offsetof(CPUSPARCState, stick));
948
- if (tb_cflags(dc->base.tb) &
949
- CF_USE_ICOUNT) {
950
- gen_io_start();
951
- }
952
+ translator_io_start(&dc->base);
953
gen_helper_tick_set_limit(r_tickptr,
954
cpu_stick_cmpr);
955
/* End TB to handle timer interrupt */
956
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
957
r_tickptr = tcg_temp_new_ptr();
958
tcg_gen_ld_ptr(r_tickptr, cpu_env,
959
offsetof(CPUSPARCState, tick));
960
- if (tb_cflags(dc->base.tb) &
961
- CF_USE_ICOUNT) {
962
- gen_io_start();
963
- }
964
+ translator_io_start(&dc->base);
965
gen_helper_tick_set_count(r_tickptr,
966
cpu_tmp0);
967
/* End TB to handle timer interrupt */
968
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
969
break;
970
case 6: // pstate
971
save_state(dc);
972
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
973
- gen_io_start();
974
- }
975
- gen_helper_wrpstate(cpu_env, cpu_tmp0);
976
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
977
- /* I/O ops in icount mode must end the TB */
978
+ if (translator_io_start(&dc->base)) {
979
dc->base.is_jmp = DISAS_EXIT;
980
}
981
+ gen_helper_wrpstate(cpu_env, cpu_tmp0);
982
dc->npc = DYNAMIC_PC;
983
break;
984
case 7: // tl
985
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
986
dc->npc = DYNAMIC_PC;
987
break;
988
case 8: // pil
989
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
990
- gen_io_start();
991
- }
992
- gen_helper_wrpil(cpu_env, cpu_tmp0);
993
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
994
- /* I/O ops in icount mode must end the TB */
995
+ if (translator_io_start(&dc->base)) {
996
dc->base.is_jmp = DISAS_EXIT;
997
}
998
+ gen_helper_wrpil(cpu_env, cpu_tmp0);
999
break;
1000
case 9: // cwp
1001
gen_helper_wrcwp(cpu_env, cpu_tmp0);
1002
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
1003
r_tickptr = tcg_temp_new_ptr();
1004
tcg_gen_ld_ptr(r_tickptr, cpu_env,
1005
offsetof(CPUSPARCState, hstick));
1006
- if (tb_cflags(dc->base.tb) &
1007
- CF_USE_ICOUNT) {
1008
- gen_io_start();
1009
- }
1010
+ translator_io_start(&dc->base);
1011
gen_helper_tick_set_limit(r_tickptr,
1012
cpu_hstick_cmpr);
1013
/* End TB to handle timer interrupt */
1014
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
1015
goto priv_insn;
1016
dc->npc = DYNAMIC_PC;
1017
dc->pc = DYNAMIC_PC;
1018
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
1019
- gen_io_start();
1020
- }
1021
+ translator_io_start(&dc->base);
1022
gen_helper_done(cpu_env);
1023
goto jmp_insn;
1024
case 1:
1025
@@ -XXX,XX +XXX,XX @@ static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
1026
goto priv_insn;
1027
dc->npc = DYNAMIC_PC;
1028
dc->pc = DYNAMIC_PC;
1029
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
1030
- gen_io_start();
1031
- }
1032
+ translator_io_start(&dc->base);
1033
gen_helper_retry(cpu_env);
1034
goto jmp_insn;
1035
default:
1036
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
1037
index XXXXXXX..XXXXXXX 100644
1038
--- a/target/tricore/translate.c
1039
+++ b/target/tricore/translate.c
1040
@@ -XXX,XX +XXX,XX @@ static TCGv cpu_PSW_SV;
1041
static TCGv cpu_PSW_AV;
1042
static TCGv cpu_PSW_SAV;
1043
1044
-#include "exec/gen-icount.h"
1045
-
1046
static const char *regnames_a[] = {
1047
"a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
1048
"a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
1049
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
1050
index XXXXXXX..XXXXXXX 100644
1051
--- a/target/xtensa/translate.c
1052
+++ b/target/xtensa/translate.c
1053
@@ -XXX,XX +XXX,XX @@ static TCGv_i32 cpu_exclusive_val;
1054
1055
static GHashTable *xtensa_regfile_table;
1056
1057
-#include "exec/gen-icount.h"
1058
-
1059
static char *sr_name[256];
1060
static char *ur_name[256];
1061
1062
@@ -XXX,XX +XXX,XX @@ static int gen_postprocess(DisasContext *dc, int slot)
1063
1064
#ifndef CONFIG_USER_ONLY
1065
if (op_flags & XTENSA_OP_CHECK_INTERRUPTS) {
1066
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
1067
- gen_io_start();
1068
- }
1069
+ translator_io_start(&dc->base);
1070
gen_helper_check_interrupts(cpu_env);
138
}
1071
}
139
#endif
1072
#endif
140
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
1073
@@ -XXX,XX +XXX,XX @@ static void translate_rsr_ccount(DisasContext *dc, const OpcodeArg arg[],
1074
const uint32_t par[])
1075
{
1076
#ifndef CONFIG_USER_ONLY
1077
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
1078
- gen_io_start();
1079
- }
1080
+ translator_io_start(&dc->base);
1081
gen_helper_update_ccount(cpu_env);
1082
tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]);
1083
#endif
1084
@@ -XXX,XX +XXX,XX @@ static void translate_waiti(DisasContext *dc, const OpcodeArg arg[],
1085
#ifndef CONFIG_USER_ONLY
1086
TCGv_i32 pc = tcg_constant_i32(dc->base.pc_next);
1087
1088
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
1089
- gen_io_start();
1090
- }
1091
+ translator_io_start(&dc->base);
1092
gen_helper_waiti(cpu_env, pc, tcg_constant_i32(arg[0].imm));
1093
#endif
1094
}
1095
@@ -XXX,XX +XXX,XX @@ static void translate_wsr_ccompare(DisasContext *dc, const OpcodeArg arg[],
1096
uint32_t id = par[0] - CCOMPARE;
1097
1098
assert(id < dc->config->nccompare);
1099
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
1100
- gen_io_start();
1101
- }
1102
+ translator_io_start(&dc->base);
1103
tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in);
1104
gen_helper_update_ccompare(cpu_env, tcg_constant_i32(id));
1105
#endif
1106
@@ -XXX,XX +XXX,XX @@ static void translate_wsr_ccount(DisasContext *dc, const OpcodeArg arg[],
1107
const uint32_t par[])
1108
{
1109
#ifndef CONFIG_USER_ONLY
1110
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
1111
- gen_io_start();
1112
- }
1113
+ translator_io_start(&dc->base);
1114
gen_helper_wsr_ccount(cpu_env, arg[0].in);
1115
#endif
1116
}
1117
@@ -XXX,XX +XXX,XX @@ static void translate_xsr_ccount(DisasContext *dc, const OpcodeArg arg[],
1118
#ifndef CONFIG_USER_ONLY
1119
TCGv_i32 tmp = tcg_temp_new_i32();
1120
1121
- if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) {
1122
- gen_io_start();
1123
- }
1124
-
1125
+ translator_io_start(&dc->base);
1126
gen_helper_update_ccount(cpu_env);
1127
tcg_gen_mov_i32(tmp, cpu_SR[par[0]]);
1128
gen_helper_wsr_ccount(cpu_env, arg[0].in);
1129
diff --git a/target/loongarch/insn_trans/trans_extra.c.inc b/target/loongarch/insn_trans/trans_extra.c.inc
1130
index XXXXXXX..XXXXXXX 100644
1131
--- a/target/loongarch/insn_trans/trans_extra.c.inc
1132
+++ b/target/loongarch/insn_trans/trans_extra.c.inc
1133
@@ -XXX,XX +XXX,XX @@ static bool gen_rdtime(DisasContext *ctx, arg_rr *a,
1134
TCGv dst1 = gpr_dst(ctx, a->rd, EXT_NONE);
1135
TCGv dst2 = gpr_dst(ctx, a->rj, EXT_NONE);
1136
1137
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1138
- gen_io_start();
1139
- }
1140
+ translator_io_start(&ctx->base);
1141
gen_helper_rdtime_d(dst1, cpu_env);
1142
if (word) {
1143
tcg_gen_sextract_tl(dst1, dst1, high ? 32 : 0, 32);
1144
diff --git a/target/loongarch/insn_trans/trans_privileged.c.inc b/target/loongarch/insn_trans/trans_privileged.c.inc
1145
index XXXXXXX..XXXXXXX 100644
1146
--- a/target/loongarch/insn_trans/trans_privileged.c.inc
1147
+++ b/target/loongarch/insn_trans/trans_privileged.c.inc
1148
@@ -XXX,XX +XXX,XX @@ static bool check_csr_flags(DisasContext *ctx, const CSRInfo *csr, bool write)
1149
if ((csr->flags & CSRFL_READONLY) && write) {
1150
return false;
141
}
1151
}
142
1152
- if ((csr->flags & CSRFL_IO) &&
143
assert_page_locked(p);
1153
- (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT)) {
144
- if (!p->code_bitmap &&
1154
- gen_io_start();
145
- ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
1155
+ if ((csr->flags & CSRFL_IO) && translator_io_start(&ctx->base)) {
146
- build_page_bitmap(p);
1156
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
147
- }
1157
} else if ((csr->flags & CSRFL_EXITTB) && write) {
148
- if (p->code_bitmap) {
1158
ctx->base.is_jmp = DISAS_EXIT_UPDATE;
149
- unsigned int nr;
1159
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
150
- unsigned long b;
1160
index XXXXXXX..XXXXXXX 100644
151
-
1161
--- a/target/riscv/insn_trans/trans_privileged.c.inc
152
- nr = start & ~TARGET_PAGE_MASK;
1162
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
153
- b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
1163
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
154
- if (b & ((1 << len) - 1)) {
1164
#ifndef CONFIG_USER_ONLY
155
- goto do_invalidate;
1165
if (has_ext(ctx, RVS)) {
1166
decode_save_opc(ctx);
1167
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1168
- gen_io_start();
156
- }
1169
- }
157
- } else {
1170
+ translator_io_start(&ctx->base);
158
- do_invalidate:
1171
gen_helper_sret(cpu_pc, cpu_env);
159
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
1172
exit_tb(ctx); /* no chaining */
160
- retaddr);
1173
ctx->base.is_jmp = DISAS_NORETURN;
161
- }
1174
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
162
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
1175
{
163
+ retaddr);
1176
#ifndef CONFIG_USER_ONLY
1177
decode_save_opc(ctx);
1178
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1179
- gen_io_start();
1180
- }
1181
+ translator_io_start(&ctx->base);
1182
gen_helper_mret(cpu_pc, cpu_env);
1183
exit_tb(ctx); /* no chaining */
1184
ctx->base.is_jmp = DISAS_NORETURN;
1185
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
1186
index XXXXXXX..XXXXXXX 100644
1187
--- a/target/riscv/insn_trans/trans_rvi.c.inc
1188
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
1189
@@ -XXX,XX +XXX,XX @@ static bool do_csrr(DisasContext *ctx, int rd, int rc)
1190
TCGv dest = dest_gpr(ctx, rd);
1191
TCGv_i32 csr = tcg_constant_i32(rc);
1192
1193
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1194
- gen_io_start();
1195
- }
1196
+ translator_io_start(&ctx->base);
1197
gen_helper_csrr(dest, cpu_env, csr);
1198
gen_set_gpr(ctx, rd, dest);
1199
return do_csr_post(ctx);
1200
@@ -XXX,XX +XXX,XX @@ static bool do_csrw(DisasContext *ctx, int rc, TCGv src)
1201
{
1202
TCGv_i32 csr = tcg_constant_i32(rc);
1203
1204
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1205
- gen_io_start();
1206
- }
1207
+ translator_io_start(&ctx->base);
1208
gen_helper_csrw(cpu_env, csr, src);
1209
return do_csr_post(ctx);
164
}
1210
}
165
#else
1211
@@ -XXX,XX +XXX,XX @@ static bool do_csrrw(DisasContext *ctx, int rd, int rc, TCGv src, TCGv mask)
166
/* Called with mmap_lock held. If pc is not 0 then it indicates the
1212
TCGv dest = dest_gpr(ctx, rd);
1213
TCGv_i32 csr = tcg_constant_i32(rc);
1214
1215
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1216
- gen_io_start();
1217
- }
1218
+ translator_io_start(&ctx->base);
1219
gen_helper_csrrw(dest, cpu_env, csr, src, mask);
1220
gen_set_gpr(ctx, rd, dest);
1221
return do_csr_post(ctx);
1222
@@ -XXX,XX +XXX,XX @@ static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
1223
TCGv desth = dest_gprh(ctx, rd);
1224
TCGv_i32 csr = tcg_constant_i32(rc);
1225
1226
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1227
- gen_io_start();
1228
- }
1229
+ translator_io_start(&ctx->base);
1230
gen_helper_csrr_i128(destl, cpu_env, csr);
1231
tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
1232
gen_set_gpr128(ctx, rd, destl, desth);
1233
@@ -XXX,XX +XXX,XX @@ static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
1234
{
1235
TCGv_i32 csr = tcg_constant_i32(rc);
1236
1237
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1238
- gen_io_start();
1239
- }
1240
+ translator_io_start(&ctx->base);
1241
gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
1242
return do_csr_post(ctx);
1243
}
1244
@@ -XXX,XX +XXX,XX @@ static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
1245
TCGv desth = dest_gprh(ctx, rd);
1246
TCGv_i32 csr = tcg_constant_i32(rc);
1247
1248
- if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1249
- gen_io_start();
1250
- }
1251
+ translator_io_start(&ctx->base);
1252
gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
1253
tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
1254
gen_set_gpr128(ctx, rd, destl, desth);
167
--
1255
--
168
2.34.1
1256
2.34.1
169
1257
170
1258
diff view generated by jsdifflib
1
This function has two users, who use it incompatibly.
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
In tlb_flush_page_by_mmuidx_async_0, when flushing a
2
3
single page, we need to flush exactly two pages.
3
Now that gen_icount_io_start() is a simple wrapper to
4
In tlb_flush_range_by_mmuidx_async_0, when flushing a
4
translator_io_start(), inline it.
5
range of pages, we need to flush N+1 pages.
5
6
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
This avoids double-flushing of jmp cache pages in a range.
7
Message-Id: <20230602095439.48102-1-philmd@linaro.org>
8
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
10
---
12
accel/tcg/cputlb.c | 25 ++++++++++++++-----------
11
target/ppc/translate.c | 63 ++++++++++++--------------
13
1 file changed, 14 insertions(+), 11 deletions(-)
12
target/ppc/power8-pmu-regs.c.inc | 10 ++--
14
13
target/ppc/translate/branch-impl.c.inc | 2 +-
15
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
14
3 files changed, 35 insertions(+), 40 deletions(-)
15
16
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
16
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cputlb.c
18
--- a/target/ppc/translate.c
18
+++ b/accel/tcg/cputlb.c
19
+++ b/target/ppc/translate.c
19
@@ -XXX,XX +XXX,XX @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
20
@@ -XXX,XX +XXX,XX @@ static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
20
}
21
ctx->base.is_jmp = DISAS_NORETURN;
21
}
22
}
22
23
23
-static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
24
-static void gen_icount_io_start(DisasContext *ctx)
24
-{
25
-{
25
- /* Discard jump cache entries for any tb which might potentially
26
- translator_io_start(&ctx->base);
26
- overlap the flushed page. */
27
- tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
28
- tb_jmp_cache_clear_page(cpu, addr);
29
-}
27
-}
30
-
28
-
31
/**
29
#if !defined(CONFIG_USER_ONLY)
32
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
30
static void gen_ppc_maybe_interrupt(DisasContext *ctx)
33
* @desc: The CPUTLBDesc portion of the TLB
31
{
34
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
32
- gen_icount_io_start(ctx);
33
+ translator_io_start(&ctx->base);
34
gen_helper_ppc_maybe_interrupt(cpu_env);
35
}
36
#endif
37
@@ -XXX,XX +XXX,XX @@ void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
38
#if !defined(CONFIG_USER_ONLY)
39
void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
40
{
41
- gen_icount_io_start(ctx);
42
+ translator_io_start(&ctx->base);
43
gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
44
}
45
46
void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
47
{
48
- gen_icount_io_start(ctx);
49
+ translator_io_start(&ctx->base);
50
gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
51
}
52
#endif
53
@@ -XXX,XX +XXX,XX @@ void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
54
/* Time base */
55
void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
56
{
57
- gen_icount_io_start(ctx);
58
+ translator_io_start(&ctx->base);
59
gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
60
}
61
62
void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
63
{
64
- gen_icount_io_start(ctx);
65
+ translator_io_start(&ctx->base);
66
gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
67
}
68
69
@@ -XXX,XX +XXX,XX @@ void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
70
#if !defined(CONFIG_USER_ONLY)
71
void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
72
{
73
- gen_icount_io_start(ctx);
74
+ translator_io_start(&ctx->base);
75
gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
76
}
77
78
void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
79
{
80
- gen_icount_io_start(ctx);
81
+ translator_io_start(&ctx->base);
82
gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
83
}
84
85
@@ -XXX,XX +XXX,XX @@ void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
86
#if defined(TARGET_PPC64)
87
void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
88
{
89
- gen_icount_io_start(ctx);
90
+ translator_io_start(&ctx->base);
91
gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
92
}
93
94
void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
95
{
96
- gen_icount_io_start(ctx);
97
+ translator_io_start(&ctx->base);
98
gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
99
}
100
101
/* HDECR */
102
void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
103
{
104
- gen_icount_io_start(ctx);
105
+ translator_io_start(&ctx->base);
106
gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
107
}
108
109
void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
110
{
111
- gen_icount_io_start(ctx);
112
+ translator_io_start(&ctx->base);
113
gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
114
}
115
116
void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
117
{
118
- gen_icount_io_start(ctx);
119
+ translator_io_start(&ctx->base);
120
gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
121
}
122
123
void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
124
{
125
- gen_icount_io_start(ctx);
126
+ translator_io_start(&ctx->base);
127
gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
128
}
129
130
void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
131
{
132
- gen_icount_io_start(ctx);
133
+ translator_io_start(&ctx->base);
134
gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
135
}
136
137
@@ -XXX,XX +XXX,XX @@ void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
138
#if !defined(CONFIG_USER_ONLY)
139
void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
140
{
141
- gen_icount_io_start(ctx);
142
+ translator_io_start(&ctx->base);
143
gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
144
}
145
146
void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
147
{
148
- gen_icount_io_start(ctx);
149
+ translator_io_start(&ctx->base);
150
gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
151
}
152
153
void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
154
{
155
- gen_icount_io_start(ctx);
156
+ translator_io_start(&ctx->base);
157
gen_store_spr(sprn, cpu_gpr[gprn]);
158
gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
159
/* We must stop translation as we may have rebooted */
160
@@ -XXX,XX +XXX,XX @@ void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
161
162
void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
163
{
164
- gen_icount_io_start(ctx);
165
+ translator_io_start(&ctx->base);
166
gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
167
}
168
169
void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
170
{
171
- gen_icount_io_start(ctx);
172
+ translator_io_start(&ctx->base);
173
gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]);
174
}
175
176
void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
177
{
178
- gen_icount_io_start(ctx);
179
+ translator_io_start(&ctx->base);
180
gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]);
181
}
182
183
@@ -XXX,XX +XXX,XX @@ void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
184
185
void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
186
{
187
- gen_icount_io_start(ctx);
188
+ translator_io_start(&ctx->base);
189
gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
190
}
191
192
void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
193
{
194
- gen_icount_io_start(ctx);
195
+ translator_io_start(&ctx->base);
196
gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
197
}
198
#endif
199
@@ -XXX,XX +XXX,XX @@ static void gen_darn(DisasContext *ctx)
200
if (l > 2) {
201
tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
202
} else {
203
- gen_icount_io_start(ctx);
204
+ translator_io_start(&ctx->base);
205
if (l == 0) {
206
gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
207
} else {
208
@@ -XXX,XX +XXX,XX @@ static void pmu_count_insns(DisasContext *ctx)
209
* running with icount and we do not handle it beforehand,
210
* the helper can trigger a 'bad icount read'.
211
*/
212
- gen_icount_io_start(ctx);
213
+ translator_io_start(&ctx->base);
214
215
/* Avoid helper calls when only PMC5-6 are enabled. */
216
if (!ctx->pmc_other) {
217
@@ -XXX,XX +XXX,XX @@ static void gen_rfi(DisasContext *ctx)
35
}
218
}
36
qemu_spin_unlock(&env_tlb(env)->c.lock);
219
/* Restore CPU state */
37
220
CHK_SV(ctx);
38
- tb_flush_jmp_cache(cpu, addr);
221
- gen_icount_io_start(ctx);
39
+ /*
222
+ translator_io_start(&ctx->base);
40
+ * Discard jump cache entries for any tb which might potentially
223
gen_update_cfar(ctx, ctx->cia);
41
+ * overlap the flushed page, which includes the previous.
224
gen_helper_rfi(cpu_env);
42
+ */
225
ctx->base.is_jmp = DISAS_EXIT;
43
+ tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
226
@@ -XXX,XX +XXX,XX @@ static void gen_rfid(DisasContext *ctx)
44
+ tb_jmp_cache_clear_page(cpu, addr);
227
#else
45
}
228
/* Restore CPU state */
46
229
CHK_SV(ctx);
47
/**
230
- gen_icount_io_start(ctx);
48
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
231
+ translator_io_start(&ctx->base);
49
return;
232
gen_update_cfar(ctx, ctx->cia);
50
}
233
gen_helper_rfid(cpu_env);
51
234
ctx->base.is_jmp = DISAS_EXIT;
52
- for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
235
@@ -XXX,XX +XXX,XX @@ static void gen_rfscv(DisasContext *ctx)
53
- tb_flush_jmp_cache(cpu, d.addr + i);
236
#else
54
+ /*
237
/* Restore CPU state */
55
+ * Discard jump cache entries for any tb which might potentially
238
CHK_SV(ctx);
56
+ * overlap the flushed pages, which includes the previous.
239
- gen_icount_io_start(ctx);
57
+ */
240
+ translator_io_start(&ctx->base);
58
+ d.addr -= TARGET_PAGE_SIZE;
241
gen_update_cfar(ctx, ctx->cia);
59
+ for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
242
gen_helper_rfscv(cpu_env);
60
+ tb_jmp_cache_clear_page(cpu, d.addr);
243
ctx->base.is_jmp = DISAS_EXIT;
61
+ d.addr += TARGET_PAGE_SIZE;
244
@@ -XXX,XX +XXX,XX @@ static void gen_mtmsrd(DisasContext *ctx)
62
}
245
t0 = tcg_temp_new();
63
}
246
t1 = tcg_temp_new();
247
248
- gen_icount_io_start(ctx);
249
+ translator_io_start(&ctx->base);
250
251
if (ctx->opcode & 0x00010000) {
252
/* L=1 form only updates EE and RI */
253
@@ -XXX,XX +XXX,XX @@ static void gen_mtmsr(DisasContext *ctx)
254
t0 = tcg_temp_new();
255
t1 = tcg_temp_new();
256
257
- gen_icount_io_start(ctx);
258
+ translator_io_start(&ctx->base);
259
if (ctx->opcode & 0x00010000) {
260
/* L=1 form only updates EE and RI */
261
mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
262
diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc
263
index XXXXXXX..XXXXXXX 100644
264
--- a/target/ppc/power8-pmu-regs.c.inc
265
+++ b/target/ppc/power8-pmu-regs.c.inc
266
@@ -XXX,XX +XXX,XX @@ static void write_MMCR0_common(DisasContext *ctx, TCGv val)
267
/*
268
* helper_store_mmcr0 will make clock based operations that
269
* will cause 'bad icount read' errors if we do not execute
270
- * gen_icount_io_start() beforehand.
271
+ * translator_io_start() beforehand.
272
*/
273
- gen_icount_io_start(ctx);
274
+ translator_io_start(&ctx->base);
275
gen_helper_store_mmcr0(cpu_env, val);
276
277
/*
278
@@ -XXX,XX +XXX,XX @@ void spr_read_PMC(DisasContext *ctx, int gprn, int sprn)
279
{
280
TCGv_i32 t_sprn = tcg_constant_i32(sprn);
281
282
- gen_icount_io_start(ctx);
283
+ translator_io_start(&ctx->base);
284
gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn);
285
}
286
287
@@ -XXX,XX +XXX,XX @@ void spr_write_PMC(DisasContext *ctx, int sprn, int gprn)
288
{
289
TCGv_i32 t_sprn = tcg_constant_i32(sprn);
290
291
- gen_icount_io_start(ctx);
292
+ translator_io_start(&ctx->base);
293
gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]);
294
}
295
296
@@ -XXX,XX +XXX,XX @@ void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn)
297
298
void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn)
299
{
300
- gen_icount_io_start(ctx);
301
+ translator_io_start(&ctx->base);
302
gen_helper_store_mmcr1(cpu_env, cpu_gpr[gprn]);
303
}
304
#else
305
diff --git a/target/ppc/translate/branch-impl.c.inc b/target/ppc/translate/branch-impl.c.inc
306
index XXXXXXX..XXXXXXX 100644
307
--- a/target/ppc/translate/branch-impl.c.inc
308
+++ b/target/ppc/translate/branch-impl.c.inc
309
@@ -XXX,XX +XXX,XX @@ static bool trans_RFEBB(DisasContext *ctx, arg_XL_s *arg)
310
{
311
REQUIRE_INSNS_FLAGS2(ctx, ISA207S);
312
313
- gen_icount_io_start(ctx);
314
+ translator_io_start(&ctx->base);
315
gen_update_cfar(ctx, ctx->cia);
316
gen_helper_rfebb(cpu_env, cpu_gpr[arg->s]);
64
317
65
--
318
--
66
2.34.1
319
2.34.1
67
320
68
321
diff view generated by jsdifflib
New patch
1
This is used by exactly one host in extraordinary circumstances.
2
This means that translator.h need not include plugin-gen.h;
3
translator.c already includes plugin-gen.h.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/exec/translator.h | 8 +-------
9
accel/tcg/translator.c | 5 +++++
10
2 files changed, 6 insertions(+), 7 deletions(-)
11
12
diff --git a/include/exec/translator.h b/include/exec/translator.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/translator.h
15
+++ b/include/exec/translator.h
16
@@ -XXX,XX +XXX,XX @@
17
#include "qemu/bswap.h"
18
#include "exec/exec-all.h"
19
#include "exec/cpu_ldst.h"
20
-#include "exec/plugin-gen.h"
21
#include "exec/translate-all.h"
22
#include "tcg/tcg.h"
23
24
@@ -XXX,XX +XXX,XX @@ translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
25
* re-synthesised for s390x "ex"). It ensures we update other areas of
26
* the translator with details of the executed instruction.
27
*/
28
-
29
-static inline void translator_fake_ldb(uint8_t insn8, abi_ptr pc)
30
-{
31
- plugin_insn_append(pc, &insn8, sizeof(insn8));
32
-}
33
-
34
+void translator_fake_ldb(uint8_t insn8, abi_ptr pc);
35
36
/*
37
* Return whether addr is on the same page as where disassembly started.
38
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/accel/tcg/translator.c
41
+++ b/accel/tcg/translator.c
42
@@ -XXX,XX +XXX,XX @@ uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
43
plugin_insn_append(pc, &plug, sizeof(ret));
44
return ret;
45
}
46
+
47
+void translator_fake_ldb(uint8_t insn8, abi_ptr pc)
48
+{
49
+ plugin_insn_append(pc, &insn8, sizeof(insn8));
50
+}
51
--
52
2.34.1
53
54
diff view generated by jsdifflib
New patch
1
Move most includes from *translate*.c to translate.h, ensuring
2
that we get the ordering correct. Ensure cpu.h is first.
3
Use disas/disas.h instead of exec/log.h.
4
Drop otherwise unused includes.
1
5
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
target/arm/tcg/translate.h | 3 +++
10
target/arm/tcg/translate-a64.c | 17 +++++------------
11
target/arm/tcg/translate-m-nocp.c | 2 --
12
target/arm/tcg/translate-mve.c | 3 ---
13
target/arm/tcg/translate-neon.c | 3 ---
14
target/arm/tcg/translate-sme.c | 6 ------
15
target/arm/tcg/translate-sve.c | 9 ---------
16
target/arm/tcg/translate-vfp.c | 3 ---
17
target/arm/tcg/translate.c | 17 +++++------------
18
9 files changed, 13 insertions(+), 50 deletions(-)
19
20
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/arm/tcg/translate.h
23
+++ b/target/arm/tcg/translate.h
24
@@ -XXX,XX +XXX,XX @@
25
#ifndef TARGET_ARM_TRANSLATE_H
26
#define TARGET_ARM_TRANSLATE_H
27
28
+#include "cpu.h"
29
+#include "tcg/tcg-op.h"
30
+#include "tcg/tcg-op-gvec.h"
31
#include "exec/translator.h"
32
#include "exec/helper-gen.h"
33
#include "internals.h"
34
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/arm/tcg/translate-a64.c
37
+++ b/target/arm/tcg/translate-a64.c
38
@@ -XXX,XX +XXX,XX @@
39
*/
40
#include "qemu/osdep.h"
41
42
-#include "cpu.h"
43
-#include "exec/exec-all.h"
44
-#include "tcg/tcg-op.h"
45
-#include "tcg/tcg-op-gvec.h"
46
-#include "qemu/log.h"
47
-#include "arm_ldst.h"
48
#include "translate.h"
49
-#include "internals.h"
50
-#include "qemu/host-utils.h"
51
-#include "semihosting/semihost.h"
52
-#include "exec/log.h"
53
-#include "cpregs.h"
54
#include "translate-a64.h"
55
-#include "qemu/atomic128.h"
56
+#include "qemu/log.h"
57
+#include "disas/disas.h"
58
+#include "arm_ldst.h"
59
+#include "semihosting/semihost.h"
60
+#include "cpregs.h"
61
62
static TCGv_i64 cpu_X[32];
63
static TCGv_i64 cpu_pc;
64
diff --git a/target/arm/tcg/translate-m-nocp.c b/target/arm/tcg/translate-m-nocp.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/arm/tcg/translate-m-nocp.c
67
+++ b/target/arm/tcg/translate-m-nocp.c
68
@@ -XXX,XX +XXX,XX @@
69
*/
70
71
#include "qemu/osdep.h"
72
-#include "tcg/tcg-op.h"
73
-#include "tcg/tcg-op-gvec.h"
74
#include "translate.h"
75
#include "translate-a32.h"
76
77
diff --git a/target/arm/tcg/translate-mve.c b/target/arm/tcg/translate-mve.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/arm/tcg/translate-mve.c
80
+++ b/target/arm/tcg/translate-mve.c
81
@@ -XXX,XX +XXX,XX @@
82
*/
83
84
#include "qemu/osdep.h"
85
-#include "tcg/tcg-op.h"
86
-#include "tcg/tcg-op-gvec.h"
87
-#include "exec/exec-all.h"
88
#include "translate.h"
89
#include "translate-a32.h"
90
91
diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/target/arm/tcg/translate-neon.c
94
+++ b/target/arm/tcg/translate-neon.c
95
@@ -XXX,XX +XXX,XX @@
96
*/
97
98
#include "qemu/osdep.h"
99
-#include "tcg/tcg-op.h"
100
-#include "tcg/tcg-op-gvec.h"
101
-#include "exec/exec-all.h"
102
#include "translate.h"
103
#include "translate-a32.h"
104
105
diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/target/arm/tcg/translate-sme.c
108
+++ b/target/arm/tcg/translate-sme.c
109
@@ -XXX,XX +XXX,XX @@
110
*/
111
112
#include "qemu/osdep.h"
113
-#include "cpu.h"
114
-#include "tcg/tcg-op.h"
115
-#include "tcg/tcg-op-gvec.h"
116
-#include "tcg/tcg-gvec-desc.h"
117
#include "translate.h"
118
#include "translate-a64.h"
119
-#include "fpu/softfloat.h"
120
-
121
122
/*
123
* Include the generated decoder.
124
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
125
index XXXXXXX..XXXXXXX 100644
126
--- a/target/arm/tcg/translate-sve.c
127
+++ b/target/arm/tcg/translate-sve.c
128
@@ -XXX,XX +XXX,XX @@
129
*/
130
131
#include "qemu/osdep.h"
132
-#include "cpu.h"
133
-#include "exec/exec-all.h"
134
-#include "tcg/tcg-op.h"
135
-#include "tcg/tcg-op-gvec.h"
136
-#include "tcg/tcg-gvec-desc.h"
137
-#include "qemu/log.h"
138
-#include "arm_ldst.h"
139
#include "translate.h"
140
-#include "internals.h"
141
-#include "exec/log.h"
142
#include "translate-a64.h"
143
#include "fpu/softfloat.h"
144
145
diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/target/arm/tcg/translate-vfp.c
148
+++ b/target/arm/tcg/translate-vfp.c
149
@@ -XXX,XX +XXX,XX @@
150
*/
151
152
#include "qemu/osdep.h"
153
-#include "tcg/tcg-op.h"
154
-#include "tcg/tcg-op-gvec.h"
155
-#include "exec/exec-all.h"
156
#include "translate.h"
157
#include "translate-a32.h"
158
159
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/arm/tcg/translate.c
162
+++ b/target/arm/tcg/translate.c
163
@@ -XXX,XX +XXX,XX @@
164
*/
165
#include "qemu/osdep.h"
166
167
-#include "cpu.h"
168
-#include "internals.h"
169
-#include "disas/disas.h"
170
-#include "exec/exec-all.h"
171
-#include "tcg/tcg-op.h"
172
-#include "tcg/tcg-op-gvec.h"
173
-#include "qemu/log.h"
174
-#include "qemu/bitops.h"
175
-#include "arm_ldst.h"
176
-#include "semihosting/semihost.h"
177
-#include "exec/log.h"
178
-#include "cpregs.h"
179
#include "translate.h"
180
#include "translate-a32.h"
181
+#include "qemu/log.h"
182
+#include "disas/disas.h"
183
+#include "arm_ldst.h"
184
+#include "semihosting/semihost.h"
185
+#include "cpregs.h"
186
#include "exec/helper-proto.h"
187
188
#define HELPER_H "helper.h"
189
--
190
2.34.1
191
192
diff view generated by jsdifflib
1
Allow the target to cache items from the guest page tables.
1
Move most includes from *translate*.c to translate.h, ensuring
2
that we get the ordering correct. Ensure cpu.h is first.
3
Use disas/disas.h instead of exec/log.h.
4
Drop otherwise unused includes.
2
5
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
include/exec/cpu-defs.h | 9 +++++++++
9
target/mips/tcg/translate.h | 6 ++++--
9
1 file changed, 9 insertions(+)
10
target/mips/tcg/msa_translate.c | 3 ---
11
target/mips/tcg/mxu_translate.c | 2 --
12
target/mips/tcg/octeon_translate.c | 4 +---
13
target/mips/tcg/rel6_translate.c | 2 --
14
target/mips/tcg/translate.c | 18 ++++++------------
15
target/mips/tcg/translate_addr_const.c | 1 -
16
target/mips/tcg/tx79_translate.c | 4 +---
17
target/mips/tcg/vr54xx_translate.c | 3 ---
18
9 files changed, 12 insertions(+), 31 deletions(-)
10
19
11
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
20
diff --git a/target/mips/tcg/translate.h b/target/mips/tcg/translate.h
12
index XXXXXXX..XXXXXXX 100644
21
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/cpu-defs.h
22
--- a/target/mips/tcg/translate.h
14
+++ b/include/exec/cpu-defs.h
23
+++ b/target/mips/tcg/translate.h
15
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntryFull {
24
@@ -XXX,XX +XXX,XX @@
16
25
#ifndef TARGET_MIPS_TRANSLATE_H
17
/* @lg_page_size contains the log2 of the page size. */
26
#define TARGET_MIPS_TRANSLATE_H
18
uint8_t lg_page_size;
27
19
+
28
-#include "qemu/log.h"
20
+ /*
29
-#include "exec/translator.h"
21
+ * Allow target-specific additions to this structure.
30
+#include "cpu.h"
22
+ * This may be used to cache items from the guest cpu
31
#include "tcg/tcg-op.h"
23
+ * page tables for later use by the implementation.
32
+#include "exec/translator.h"
24
+ */
33
+#include "exec/helper-gen.h"
25
+#ifdef TARGET_PAGE_ENTRY_EXTRA
34
+#include "qemu/log.h"
26
+ TARGET_PAGE_ENTRY_EXTRA
35
27
+#endif
36
#define MIPS_DEBUG_DISAS 0
28
} CPUTLBEntryFull;
37
38
diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/mips/tcg/msa_translate.c
41
+++ b/target/mips/tcg/msa_translate.c
42
@@ -XXX,XX +XXX,XX @@
43
* SPDX-License-Identifier: LGPL-2.1-or-later
44
*/
45
#include "qemu/osdep.h"
46
-#include "tcg/tcg-op.h"
47
-#include "exec/helper-gen.h"
48
#include "translate.h"
49
#include "fpu_helper.h"
50
-#include "internal.h"
51
52
static int elm_n(DisasContext *ctx, int x);
53
static int elm_df(DisasContext *ctx, int x);
54
diff --git a/target/mips/tcg/mxu_translate.c b/target/mips/tcg/mxu_translate.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/target/mips/tcg/mxu_translate.c
57
+++ b/target/mips/tcg/mxu_translate.c
58
@@ -XXX,XX +XXX,XX @@
59
*/
60
61
#include "qemu/osdep.h"
62
-#include "tcg/tcg-op.h"
63
-#include "exec/helper-gen.h"
64
#include "translate.h"
29
65
30
/*
66
/*
67
diff --git a/target/mips/tcg/octeon_translate.c b/target/mips/tcg/octeon_translate.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/target/mips/tcg/octeon_translate.c
70
+++ b/target/mips/tcg/octeon_translate.c
71
@@ -XXX,XX +XXX,XX @@
72
*/
73
74
#include "qemu/osdep.h"
75
-#include "tcg/tcg-op.h"
76
-#include "tcg/tcg-op-gvec.h"
77
-#include "exec/helper-gen.h"
78
#include "translate.h"
79
+#include "tcg/tcg-op-gvec.h"
80
81
/* Include the auto-generated decoder. */
82
#include "decode-octeon.c.inc"
83
diff --git a/target/mips/tcg/rel6_translate.c b/target/mips/tcg/rel6_translate.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/target/mips/tcg/rel6_translate.c
86
+++ b/target/mips/tcg/rel6_translate.c
87
@@ -XXX,XX +XXX,XX @@
88
*/
89
90
#include "qemu/osdep.h"
91
-#include "tcg/tcg-op.h"
92
-#include "exec/helper-gen.h"
93
#include "translate.h"
94
95
/* Include the auto-generated decoders. */
96
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/target/mips/tcg/translate.c
99
+++ b/target/mips/tcg/translate.c
100
@@ -XXX,XX +XXX,XX @@
101
*/
102
103
#include "qemu/osdep.h"
104
-#include "cpu.h"
105
-#include "internal.h"
106
-#include "tcg/tcg-op.h"
107
-#include "exec/translator.h"
108
-#include "exec/helper-proto.h"
109
-#include "exec/helper-gen.h"
110
-#include "semihosting/semihost.h"
111
-
112
-#include "trace.h"
113
-#include "exec/log.h"
114
-#include "qemu/qemu-print.h"
115
-#include "fpu_helper.h"
116
#include "translate.h"
117
+#include "internal.h"
118
+#include "exec/helper-proto.h"
119
+#include "semihosting/semihost.h"
120
+#include "trace.h"
121
+#include "disas/disas.h"
122
+#include "fpu_helper.h"
123
124
#define HELPER_H "helper.h"
125
#include "exec/helper-info.c.inc"
126
diff --git a/target/mips/tcg/translate_addr_const.c b/target/mips/tcg/translate_addr_const.c
127
index XXXXXXX..XXXXXXX 100644
128
--- a/target/mips/tcg/translate_addr_const.c
129
+++ b/target/mips/tcg/translate_addr_const.c
130
@@ -XXX,XX +XXX,XX @@
131
* SPDX-License-Identifier: LGPL-2.1-or-later
132
*/
133
#include "qemu/osdep.h"
134
-#include "tcg/tcg-op.h"
135
#include "translate.h"
136
137
bool gen_lsa(DisasContext *ctx, int rd, int rt, int rs, int sa)
138
diff --git a/target/mips/tcg/tx79_translate.c b/target/mips/tcg/tx79_translate.c
139
index XXXXXXX..XXXXXXX 100644
140
--- a/target/mips/tcg/tx79_translate.c
141
+++ b/target/mips/tcg/tx79_translate.c
142
@@ -XXX,XX +XXX,XX @@
143
*/
144
145
#include "qemu/osdep.h"
146
-#include "tcg/tcg-op.h"
147
-#include "tcg/tcg-op-gvec.h"
148
-#include "exec/helper-gen.h"
149
#include "translate.h"
150
+#include "tcg/tcg-op-gvec.h"
151
152
/* Include the auto-generated decoder. */
153
#include "decode-tx79.c.inc"
154
diff --git a/target/mips/tcg/vr54xx_translate.c b/target/mips/tcg/vr54xx_translate.c
155
index XXXXXXX..XXXXXXX 100644
156
--- a/target/mips/tcg/vr54xx_translate.c
157
+++ b/target/mips/tcg/vr54xx_translate.c
158
@@ -XXX,XX +XXX,XX @@
159
*/
160
161
#include "qemu/osdep.h"
162
-#include "tcg/tcg-op.h"
163
-#include "exec/helper-gen.h"
164
#include "translate.h"
165
-#include "internal.h"
166
167
/* Include the auto-generated decoder. */
168
#include "decode-vr54xx.c.inc"
31
--
169
--
32
2.34.1
170
2.34.1
33
171
34
172
diff view generated by jsdifflib
New patch
1
This had been pulled in via exec/exec-all.h, via exec/translator.h,
2
but the include of exec-all.h will be removed.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/hexagon/translate.c | 1 +
8
target/loongarch/translate.c | 3 +--
9
target/mips/tcg/translate.c | 1 +
10
3 files changed, 3 insertions(+), 2 deletions(-)
11
12
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/target/hexagon/translate.c
15
+++ b/target/hexagon/translate.c
16
@@ -XXX,XX +XXX,XX @@
17
#include "tcg/tcg-op-gvec.h"
18
#include "exec/helper-gen.h"
19
#include "exec/helper-proto.h"
20
+#include "exec/translation-block.h"
21
#include "exec/cpu_ldst.h"
22
#include "exec/log.h"
23
#include "internal.h"
24
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/loongarch/translate.c
27
+++ b/target/loongarch/translate.c
28
@@ -XXX,XX +XXX,XX @@
29
#include "cpu.h"
30
#include "tcg/tcg-op.h"
31
#include "tcg/tcg-op-gvec.h"
32
-
33
+#include "exec/translation-block.h"
34
#include "exec/translator.h"
35
#include "exec/helper-proto.h"
36
#include "exec/helper-gen.h"
37
-
38
#include "exec/log.h"
39
#include "qemu/qemu-print.h"
40
#include "fpu/softfloat.h"
41
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/mips/tcg/translate.c
44
+++ b/target/mips/tcg/translate.c
45
@@ -XXX,XX +XXX,XX @@
46
#include "translate.h"
47
#include "internal.h"
48
#include "exec/helper-proto.h"
49
+#include "exec/translation-block.h"
50
#include "semihosting/semihost.h"
51
#include "trace.h"
52
#include "disas/disas.h"
53
--
54
2.34.1
55
56
diff view generated by jsdifflib
New patch
1
This had been pulled in via exec/translator.h,
2
but the include of exec-all.h will be removed.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
target/arm/tcg/translate.h | 1 +
8
1 file changed, 1 insertion(+)
9
10
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/target/arm/tcg/translate.h
13
+++ b/target/arm/tcg/translate.h
14
@@ -XXX,XX +XXX,XX @@
15
#include "cpu.h"
16
#include "tcg/tcg-op.h"
17
#include "tcg/tcg-op-gvec.h"
18
+#include "exec/exec-all.h"
19
#include "exec/translator.h"
20
#include "exec/helper-gen.h"
21
#include "internals.h"
22
--
23
2.34.1
24
25
diff view generated by jsdifflib
New patch
1
Reduce the header to only bswap.h and cpu_ldst.h.
2
Move exec/translate-all.h to translator.c.
3
Reduce tcg.h and tcg-op.h to tcg-op-common.h.
4
Remove otherwise unused headers.
1
5
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/exec/translator.h | 6 +-----
10
accel/tcg/translator.c | 8 +++-----
11
2 files changed, 4 insertions(+), 10 deletions(-)
12
13
diff --git a/include/exec/translator.h b/include/exec/translator.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/translator.h
16
+++ b/include/exec/translator.h
17
@@ -XXX,XX +XXX,XX @@
18
* member in your target-specific DisasContext.
19
*/
20
21
-
22
#include "qemu/bswap.h"
23
-#include "exec/exec-all.h"
24
-#include "exec/cpu_ldst.h"
25
-#include "exec/translate-all.h"
26
-#include "tcg/tcg.h"
27
+#include "exec/cpu_ldst.h"    /* for abi_ptr */
28
29
/**
30
* gen_intermediate_code
31
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/accel/tcg/translator.c
34
+++ b/accel/tcg/translator.c
35
@@ -XXX,XX +XXX,XX @@
36
*/
37
38
#include "qemu/osdep.h"
39
+#include "qemu/log.h"
40
#include "qemu/error-report.h"
41
-#include "tcg/tcg.h"
42
-#include "tcg/tcg-op.h"
43
#include "exec/exec-all.h"
44
-#include "exec/log.h"
45
#include "exec/translator.h"
46
+#include "exec/translate-all.h"
47
#include "exec/plugin-gen.h"
48
-#include "exec/replay-core.h"
49
-
50
+#include "tcg/tcg-op-common.h"
51
52
static void gen_io_start(void)
53
{
54
--
55
2.34.1
56
57
diff view generated by jsdifflib
New patch
1
The bug was hidden because they happen to have the same values.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/region.c | 18 +++++++++++++-----
7
1 file changed, 13 insertions(+), 5 deletions(-)
8
9
diff --git a/tcg/region.c b/tcg/region.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/region.c
12
+++ b/tcg/region.c
13
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
14
return PROT_READ | PROT_WRITE;
15
}
16
#elif defined(_WIN32)
17
+/*
18
+ * Local source-level compatibility with Unix.
19
+ * Used by tcg_region_init below.
20
+ */
21
+#define PROT_READ 1
22
+#define PROT_WRITE 2
23
+#define PROT_EXEC 4
24
+
25
static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
26
{
27
void *buf;
28
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
29
region.start_aligned = buf;
30
region.total_size = size;
31
32
- return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
33
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
34
}
35
#else
36
static int alloc_code_gen_buffer_anon(size_t size, int prot,
37
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
38
* buffer -- let that one use hugepages throughout.
39
* Work with the page protections set up with the initial mapping.
40
*/
41
- need_prot = PAGE_READ | PAGE_WRITE;
42
+ need_prot = PROT_READ | PROT_WRITE;
43
#ifndef CONFIG_TCG_INTERPRETER
44
if (tcg_splitwx_diff == 0) {
45
- need_prot |= PAGE_EXEC;
46
+ need_prot |= PROT_EXEC;
47
}
48
#endif
49
for (size_t i = 0, n = region.n; i < n; i++) {
50
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
51
if (have_prot != need_prot) {
52
int rc;
53
54
- if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) {
55
+ if (need_prot == (PROT_READ | PROT_WRITE | PROT_EXEC)) {
56
rc = qemu_mprotect_rwx(start, end - start);
57
- } else if (need_prot == (PAGE_READ | PAGE_WRITE)) {
58
+ } else if (need_prot == (PROT_READ | PROT_WRITE)) {
59
rc = qemu_mprotect_rw(start, end - start);
60
} else {
61
g_assert_not_reached();
62
--
63
2.34.1
64
65
diff view generated by jsdifflib
New patch
1
Since the change to CPUArchState, we have a common typedef
2
that can always be used.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/exec/helper-head.h | 6 +++---
8
1 file changed, 3 insertions(+), 3 deletions(-)
9
10
diff --git a/include/exec/helper-head.h b/include/exec/helper-head.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/exec/helper-head.h
13
+++ b/include/exec/helper-head.h
14
@@ -XXX,XX +XXX,XX @@
15
#define dh_alias_f64 i64
16
#define dh_alias_ptr ptr
17
#define dh_alias_cptr ptr
18
+#define dh_alias_env ptr
19
#define dh_alias_void void
20
#define dh_alias_noreturn noreturn
21
#define dh_alias(t) glue(dh_alias_, t)
22
@@ -XXX,XX +XXX,XX @@
23
#define dh_ctype_f64 float64
24
#define dh_ctype_ptr void *
25
#define dh_ctype_cptr const void *
26
+#define dh_ctype_env CPUArchState *
27
#define dh_ctype_void void
28
#define dh_ctype_noreturn G_NORETURN void
29
#define dh_ctype(t) dh_ctype_##t
30
@@ -XXX,XX +XXX,XX @@
31
# endif
32
# endif
33
# define dh_ctype_tl target_ulong
34
-# define dh_alias_env ptr
35
-# define dh_ctype_env CPUArchState *
36
-# define dh_typecode_env dh_typecode_ptr
37
#endif
38
39
/* We can't use glue() here because it falls foul of C preprocessor
40
@@ -XXX,XX +XXX,XX @@
41
#define dh_typecode_f32 dh_typecode_i32
42
#define dh_typecode_f64 dh_typecode_i64
43
#define dh_typecode_cptr dh_typecode_ptr
44
+#define dh_typecode_env dh_typecode_ptr
45
#define dh_typecode(t) dh_typecode_##t
46
47
#define dh_callflag_i32 0
48
--
49
2.34.1
50
51
diff view generated by jsdifflib
New patch
1
This finally paves the way for tcg/ to be built once per mode.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/tcg/tcg.h | 1 -
7
accel/tcg/plugin-gen.c | 1 +
8
tcg/region.c | 2 +-
9
tcg/tcg-op.c | 2 +-
10
tcg/tcg.c | 2 +-
11
5 files changed, 4 insertions(+), 4 deletions(-)
12
13
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg.h
16
+++ b/include/tcg/tcg.h
17
@@ -XXX,XX +XXX,XX @@
18
#ifndef TCG_H
19
#define TCG_H
20
21
-#include "cpu.h"
22
#include "exec/memop.h"
23
#include "exec/memopidx.h"
24
#include "qemu/bitops.h"
25
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/accel/tcg/plugin-gen.c
28
+++ b/accel/tcg/plugin-gen.c
29
@@ -XXX,XX +XXX,XX @@
30
* CPU's index into a TCG temp, since the first callback did it already.
31
*/
32
#include "qemu/osdep.h"
33
+#include "cpu.h"
34
#include "tcg/tcg.h"
35
#include "tcg/tcg-temp-internal.h"
36
#include "tcg/tcg-op.h"
37
diff --git a/tcg/region.c b/tcg/region.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/tcg/region.c
40
+++ b/tcg/region.c
41
@@ -XXX,XX +XXX,XX @@
42
#include "qemu/cacheinfo.h"
43
#include "qemu/qtree.h"
44
#include "qapi/error.h"
45
-#include "exec/exec-all.h"
46
#include "tcg/tcg.h"
47
+#include "exec/translation-block.h"
48
#include "tcg-internal.h"
49
50
51
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/tcg/tcg-op.c
54
+++ b/tcg/tcg-op.c
55
@@ -XXX,XX +XXX,XX @@
56
*/
57
58
#include "qemu/osdep.h"
59
-#include "exec/exec-all.h"
60
#include "tcg/tcg.h"
61
#include "tcg/tcg-temp-internal.h"
62
#include "tcg/tcg-op-common.h"
63
+#include "exec/translation-block.h"
64
#include "exec/plugin-gen.h"
65
#include "tcg-internal.h"
66
67
diff --git a/tcg/tcg.c b/tcg/tcg.c
68
index XXXXXXX..XXXXXXX 100644
69
--- a/tcg/tcg.c
70
+++ b/tcg/tcg.c
71
@@ -XXX,XX +XXX,XX @@
72
#include "qemu/cacheflush.h"
73
#include "qemu/cacheinfo.h"
74
#include "qemu/timer.h"
75
-#include "exec/exec-all.h"
76
+#include "exec/translation-block.h"
77
#include "exec/tlb-common.h"
78
#include "tcg/tcg-op-common.h"
79
80
--
81
2.34.1
82
83
diff view generated by jsdifflib
1
Use the pc coming from db->pc_first rather than the TB.
1
This function is only used in translator.c, and uses a
2
target-specific typedef: abi_ptr.
2
3
3
Use the cached host_addr rather than re-computing for the
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
first page. We still need a separate lookup for the second
5
page because it won't be computed for DisasContextBase until
6
the translator actually performs a read from the page.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
include/exec/plugin-gen.h | 7 ++++---
7
include/exec/plugin-gen.h | 22 ----------------------
12
accel/tcg/plugin-gen.c | 22 +++++++++++-----------
8
accel/tcg/translator.c | 21 +++++++++++++++++++++
13
accel/tcg/translator.c | 2 +-
9
2 files changed, 21 insertions(+), 22 deletions(-)
14
3 files changed, 16 insertions(+), 15 deletions(-)
15
10
16
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
11
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/plugin-gen.h
13
--- a/include/exec/plugin-gen.h
19
+++ b/include/exec/plugin-gen.h
14
+++ b/include/exec/plugin-gen.h
20
@@ -XXX,XX +XXX,XX @@ struct DisasContextBase;
15
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_end(void);
21
16
void plugin_gen_disable_mem_helpers(void);
22
#ifdef CONFIG_PLUGIN
17
void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info);
23
18
24
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress);
19
-static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
25
+bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db,
20
-{
26
+ bool supress);
21
- struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn;
27
void plugin_gen_tb_end(CPUState *cpu);
22
- abi_ptr off;
28
void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db);
23
-
29
void plugin_gen_insn_end(void);
24
- if (insn == NULL) {
30
@@ -XXX,XX +XXX,XX @@ static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
25
- return;
31
26
- }
27
- off = pc - insn->vaddr;
28
- if (off < insn->data->len) {
29
- g_byte_array_set_size(insn->data, off);
30
- } else if (off > insn->data->len) {
31
- /* we have an unexpected gap */
32
- g_assert_not_reached();
33
- }
34
-
35
- insn->data = g_byte_array_append(insn->data, from, size);
36
-}
37
-
32
#else /* !CONFIG_PLUGIN */
38
#else /* !CONFIG_PLUGIN */
33
39
34
-static inline
40
static inline bool
35
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress)
41
@@ -XXX,XX +XXX,XX @@ static inline void plugin_gen_disable_mem_helpers(void)
36
+static inline bool
42
static inline void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
37
+plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, bool sup)
43
{ }
38
{
44
39
return false;
45
-static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
40
}
46
-{ }
41
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
47
-
42
index XXXXXXX..XXXXXXX 100644
48
#endif /* CONFIG_PLUGIN */
43
--- a/accel/tcg/plugin-gen.c
49
44
+++ b/accel/tcg/plugin-gen.c
50
#endif /* QEMU_PLUGIN_GEN_H */
45
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
46
pr_ops();
47
}
48
49
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only)
50
+bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
51
+ bool mem_only)
52
{
53
bool ret = false;
54
55
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_onl
56
57
ret = true;
58
59
- ptb->vaddr = tb->pc;
60
+ ptb->vaddr = db->pc_first;
61
ptb->vaddr2 = -1;
62
- get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1);
63
+ ptb->haddr1 = db->host_addr[0];
64
ptb->haddr2 = NULL;
65
ptb->mem_only = mem_only;
66
67
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
68
* Note that we skip this when haddr1 == NULL, e.g. when we're
69
* fetching instructions from a region not backed by RAM.
70
*/
71
- if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) &&
72
- unlikely((db->pc_next & TARGET_PAGE_MASK) !=
73
- (db->pc_first & TARGET_PAGE_MASK))) {
74
- get_page_addr_code_hostp(cpu->env_ptr, db->pc_next,
75
- &ptb->haddr2);
76
- ptb->vaddr2 = db->pc_next;
77
- }
78
- if (likely(ptb->vaddr2 == -1)) {
79
+ if (ptb->haddr1 == NULL) {
80
+ pinsn->haddr = NULL;
81
+ } else if (is_same_page(db, db->pc_next)) {
82
pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
83
} else {
84
+ if (ptb->vaddr2 == -1) {
85
+ ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
86
+ get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
87
+ }
88
pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
89
}
90
}
91
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
51
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
92
index XXXXXXX..XXXXXXX 100644
52
index XXXXXXX..XXXXXXX 100644
93
--- a/accel/tcg/translator.c
53
--- a/accel/tcg/translator.c
94
+++ b/accel/tcg/translator.c
54
+++ b/accel/tcg/translator.c
95
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
55
@@ -XXX,XX +XXX,XX @@ static void *translator_access(CPUArchState *env, DisasContextBase *db,
96
ops->tb_start(db, cpu);
56
return host + (pc - base);
97
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
57
}
98
58
99
- plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY);
59
+static void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
100
+ plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
60
+{
101
61
+#ifdef CONFIG_PLUGIN
102
while (true) {
62
+ struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn;
103
db->num_insns++;
63
+ abi_ptr off;
64
+
65
+ if (insn == NULL) {
66
+ return;
67
+ }
68
+ off = pc - insn->vaddr;
69
+ if (off < insn->data->len) {
70
+ g_byte_array_set_size(insn->data, off);
71
+ } else if (off > insn->data->len) {
72
+ /* we have an unexpected gap */
73
+ g_assert_not_reached();
74
+ }
75
+
76
+ insn->data = g_byte_array_append(insn->data, from, size);
77
+#endif
78
+}
79
+
80
uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
81
{
82
uint8_t ret;
104
--
83
--
105
2.34.1
84
2.34.1
106
85
107
86
diff view generated by jsdifflib
New patch
1
Two headers are not required for the rest of the
2
contents of plugin-gen.h.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/exec/plugin-gen.h | 2 --
8
1 file changed, 2 deletions(-)
9
10
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/exec/plugin-gen.h
13
+++ b/include/exec/plugin-gen.h
14
@@ -XXX,XX +XXX,XX @@
15
#ifndef QEMU_PLUGIN_GEN_H
16
#define QEMU_PLUGIN_GEN_H
17
18
-#include "exec/cpu_ldst.h"
19
-#include "qemu/plugin.h"
20
#include "tcg/tcg.h"
21
22
struct DisasContextBase;
23
--
24
2.34.1
25
26
diff view generated by jsdifflib
New patch
1
If CONFIG_USER_ONLY is ok generically, so is CONFIG_SOFTMMU,
2
because they are exactly opposite.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/exec/poison.h | 1 -
8
scripts/make-config-poison.sh | 5 +++--
9
2 files changed, 3 insertions(+), 3 deletions(-)
10
11
diff --git a/include/exec/poison.h b/include/exec/poison.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/poison.h
14
+++ b/include/exec/poison.h
15
@@ -XXX,XX +XXX,XX @@
16
#pragma GCC poison CONFIG_HVF
17
#pragma GCC poison CONFIG_LINUX_USER
18
#pragma GCC poison CONFIG_KVM
19
-#pragma GCC poison CONFIG_SOFTMMU
20
#pragma GCC poison CONFIG_WHPX
21
#pragma GCC poison CONFIG_XEN
22
23
diff --git a/scripts/make-config-poison.sh b/scripts/make-config-poison.sh
24
index XXXXXXX..XXXXXXX 100755
25
--- a/scripts/make-config-poison.sh
26
+++ b/scripts/make-config-poison.sh
27
@@ -XXX,XX +XXX,XX @@ if test $# = 0; then
28
exit 0
29
fi
30
31
-# Create list of config switches that should be poisoned in common code...
32
-# but filter out CONFIG_TCG and CONFIG_USER_ONLY which are special.
33
+# Create list of config switches that should be poisoned in common code,
34
+# but filter out several which are handled manually.
35
exec sed -n \
36
-e' /CONFIG_TCG/d' \
37
-e '/CONFIG_USER_ONLY/d' \
38
+ -e '/CONFIG_SOFTMMU/d' \
39
-e '/^#define / {' \
40
-e 's///' \
41
-e 's/ .*//' \
42
--
43
2.34.1
44
45
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
Create two static libraries for use by each execution mode.
2
2
3
This is a heavily used function so lets avoid the cost of
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
CPU_GET_CLASS. On the romulus-bmc run it has a modest effect:
5
6
Before: 36.812 s ± 0.506 s
7
After: 35.912 s ± 0.168 s
8
9
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20220811151413.3350684-4-alex.bennee@linaro.org>
12
Signed-off-by: Cédric Le Goater <clg@kaod.org>
13
Message-Id: <20220923084803.498337-4-clg@kaod.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
5
---
16
hw/core/cpu-sysemu.c | 5 ++---
6
tcg/meson.build | 30 +++++++++++++++++++++++++++---
17
1 file changed, 2 insertions(+), 3 deletions(-)
7
1 file changed, 27 insertions(+), 3 deletions(-)
18
8
19
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
9
diff --git a/tcg/meson.build b/tcg/meson.build
20
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/core/cpu-sysemu.c
11
--- a/tcg/meson.build
22
+++ b/hw/core/cpu-sysemu.c
12
+++ b/tcg/meson.build
23
@@ -XXX,XX +XXX,XX @@ hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
13
@@ -XXX,XX +XXX,XX @@
24
14
+if not get_option('tcg').allowed()
25
int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
15
+ subdir_done()
26
{
16
+endif
27
- CPUClass *cc = CPU_GET_CLASS(cpu);
17
+
28
int ret = 0;
18
tcg_ss = ss.source_set()
29
19
30
- if (cc->sysemu_ops->asidx_from_attrs) {
20
tcg_ss.add(files(
31
- ret = cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
21
@@ -XXX,XX +XXX,XX @@ tcg_ss.add(files(
32
+ if (cpu->cc->sysemu_ops->asidx_from_attrs) {
22
if get_option('tcg_interpreter')
33
+ ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
23
libffi = dependency('libffi', version: '>=3.0', required: true,
34
assert(ret < cpu->num_ases && ret >= 0);
24
method: 'pkg-config')
35
}
25
- specific_ss.add(libffi)
36
return ret;
26
- specific_ss.add(files('tci.c'))
27
+ tcg_ss.add(libffi)
28
+ tcg_ss.add(files('tci.c'))
29
endif
30
31
-specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
32
+tcg_ss = tcg_ss.apply(config_host, strict: false)
33
+
34
+libtcg_user = static_library('tcg_user',
35
+ tcg_ss.sources() + genh,
36
+ name_suffix: 'fa',
37
+ c_args: '-DCONFIG_USER_ONLY',
38
+ build_by_default: have_user)
39
+
40
+tcg_user = declare_dependency(link_with: libtcg_user,
41
+ dependencies: tcg_ss.dependencies())
42
+user_ss.add(tcg_user)
43
+
44
+libtcg_softmmu = static_library('tcg_softmmu',
45
+ tcg_ss.sources() + genh,
46
+ name_suffix: 'fa',
47
+ c_args: '-DCONFIG_SOFTMMU',
48
+ build_by_default: have_system)
49
+
50
+tcg_softmmu = declare_dependency(link_with: libtcg_softmmu,
51
+ dependencies: tcg_ss.dependencies())
52
+softmmu_ss.add(tcg_softmmu)
37
--
53
--
38
2.34.1
54
2.34.1
39
55
40
56
diff view generated by jsdifflib
New patch
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
2
3
Coverity complains that perf_marker is never unmapped.
4
Fix by unmapping it in perf_exit().
5
6
Fixes: Coverity CID 1507929
7
Fixes: 5584e2dbe8c9 ("tcg: add perfmap and jitdump")
8
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Message-Id: <20230605114134.1169974-1-iii@linux.ibm.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
accel/tcg/perf.c | 11 +++++++++--
14
1 file changed, 9 insertions(+), 2 deletions(-)
15
16
diff --git a/accel/tcg/perf.c b/accel/tcg/perf.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/perf.c
19
+++ b/accel/tcg/perf.c
20
@@ -XXX,XX +XXX,XX @@ static void write_perfmap_entry(const void *start, size_t insn,
21
}
22
23
static FILE *jitdump;
24
+static size_t perf_marker_size;
25
+static void *perf_marker = MAP_FAILED;
26
27
#define JITHEADER_MAGIC 0x4A695444
28
#define JITHEADER_VERSION 1
29
@@ -XXX,XX +XXX,XX @@ void perf_enable_jitdump(void)
30
{
31
struct jitheader header;
32
char jitdump_file[32];
33
- void *perf_marker;
34
35
if (!use_rt_clock) {
36
warn_report("CLOCK_MONOTONIC is not available, proceeding without jitdump");
37
@@ -XXX,XX +XXX,XX @@ void perf_enable_jitdump(void)
38
* PERF_RECORD_MMAP or PERF_RECORD_MMAP2 event is of the form jit-%d.dump
39
* and will process it as a jitdump file.
40
*/
41
- perf_marker = mmap(NULL, qemu_real_host_page_size(), PROT_READ | PROT_EXEC,
42
+ perf_marker_size = qemu_real_host_page_size();
43
+ perf_marker = mmap(NULL, perf_marker_size, PROT_READ | PROT_EXEC,
44
MAP_PRIVATE, fileno(jitdump), 0);
45
if (perf_marker == MAP_FAILED) {
46
warn_report("Could not map %s: %s, proceeding without jitdump",
47
@@ -XXX,XX +XXX,XX @@ void perf_exit(void)
48
perfmap = NULL;
49
}
50
51
+ if (perf_marker != MAP_FAILED) {
52
+ munmap(perf_marker, perf_marker_size);
53
+ perf_marker = MAP_FAILED;
54
+ }
55
+
56
if (jitdump) {
57
fclose(jitdump);
58
jitdump = NULL;
59
--
60
2.34.1
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
2
3
The class cast checkers are quite expensive and always on (unlike the
3
In commit d56fea79f9 ("tcg: Move TCG_{LOW,HIGH} to tcg-internal.h")
4
dynamic case who's checks are gated by CONFIG_QOM_CAST_DEBUG). To
4
we replaced the "_link_error" definitions with modern QEMU_ERROR()
5
avoid the overhead of repeatedly checking something which should never
5
attribute markup. We covered tcg-op.c but forgot to completely
6
change we cache the CPUClass reference for use in the hot code paths.
6
clean tcg-op-vec.c. Do it now.
7
7
8
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Message-Id: <20230605175647.88395-3-philmd@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20220811151413.3350684-3-alex.bennee@linaro.org>
11
Signed-off-by: Cédric Le Goater <clg@kaod.org>
12
Message-Id: <20220923084803.498337-3-clg@kaod.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
12
---
15
include/hw/core/cpu.h | 9 +++++++++
13
tcg/tcg-op-vec.c | 11 -----------
16
cpu.c | 9 ++++-----
14
1 file changed, 11 deletions(-)
17
2 files changed, 13 insertions(+), 5 deletions(-)
18
15
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
16
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
20
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/core/cpu.h
18
--- a/tcg/tcg-op-vec.c
22
+++ b/include/hw/core/cpu.h
19
+++ b/tcg/tcg-op-vec.c
23
@@ -XXX,XX +XXX,XX @@ typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
20
@@ -XXX,XX +XXX,XX @@
24
*/
21
#include "tcg/tcg-mo.h"
25
#define CPU(obj) ((CPUState *)(obj))
22
#include "tcg-internal.h"
26
23
27
+/*
24
-
28
+ * The class checkers bring in CPU_GET_CLASS() which is potentially
25
-/* Reduce the number of ifdefs below. This assumes that all uses of
29
+ * expensive given the eventual call to
26
- TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
30
+ * object_class_dynamic_cast_assert(). Because of this the CPUState
27
- the compiler can eliminate. */
31
+ * has a cached value for the class in cs->cc which is set up in
28
-#if TCG_TARGET_REG_BITS == 64
32
+ * cpu_exec_realizefn() for use in hot code paths.
29
-extern TCGv_i32 TCGV_LOW_link_error(TCGv_i64);
33
+ */
30
-extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64);
34
typedef struct CPUClass CPUClass;
31
-#define TCGV_LOW TCGV_LOW_link_error
35
DECLARE_CLASS_CHECKERS(CPUClass, CPU,
32
-#define TCGV_HIGH TCGV_HIGH_link_error
36
TYPE_CPU)
37
@@ -XXX,XX +XXX,XX @@ struct qemu_work_item;
38
struct CPUState {
39
/*< private >*/
40
DeviceState parent_obj;
41
+ /* cache to avoid expensive CPU_GET_CLASS */
42
+ CPUClass *cc;
43
/*< public >*/
44
45
int nr_cores;
46
diff --git a/cpu.c b/cpu.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/cpu.c
49
+++ b/cpu.c
50
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_cpu_common = {
51
52
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
53
{
54
-#ifndef CONFIG_USER_ONLY
55
- CPUClass *cc = CPU_GET_CLASS(cpu);
56
-#endif
33
-#endif
57
+ /* cache the cpu class for the hotpath */
34
-
58
+ cpu->cc = CPU_GET_CLASS(cpu);
35
/*
59
36
* Vector optional opcode tracking.
60
cpu_list_add(cpu);
37
* Except for the basic logical operations (and, or, xor), and
61
if (!accel_cpu_realizefn(cpu, errp)) {
62
@@ -XXX,XX +XXX,XX @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
63
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
64
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
65
}
66
- if (cc->sysemu_ops->legacy_vmsd != NULL) {
67
- vmstate_register(NULL, cpu->cpu_index, cc->sysemu_ops->legacy_vmsd, cpu);
68
+ if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
69
+ vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
70
}
71
#endif /* CONFIG_USER_ONLY */
72
}
73
--
38
--
74
2.34.1
39
2.34.1
75
40
76
41
diff view generated by jsdifflib