1
TCG patch queue, plus one target/sh4 patch that
1
Note that I have refreshed the expiry of my public key.
2
Yoshinori Sato asked me to process.
2
and pushed to keys.openpgp.org.
3
3
4
4
5
r~
5
r~
6
6
7
7
8
The following changes since commit efbf38d73e5dcc4d5f8b98c6e7a12be1f3b91745:
8
The following changes since commit 4d5d933bbc7cc52f6cc6b9021f91fa06266222d5:
9
9
10
Merge tag 'for-upstream' of git://repo.or.cz/qemu/kevin into staging (2022-10-03 15:06:07 -0400)
10
Merge tag 'pull-xenfv-20250116' of git://git.infradead.org/users/dwmw2/qemu into staging (2025-01-16 09:03:43 -0500)
11
11
12
are available in the Git repository at:
12
are available in the Git repository at:
13
13
14
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20221004
14
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20250117
15
15
16
for you to fetch changes up to ab419fd8a035a65942de4e63effcd55ccbf1a9fe:
16
for you to fetch changes up to db1649823d4f27b924a5aa5f9e0111457accb798:
17
17
18
target/sh4: Fix TB_FLAG_UNALIGN (2022-10-04 12:33:05 -0700)
18
softfloat: Constify helpers returning float_status field (2025-01-17 08:29:25 -0800)
19
19
20
----------------------------------------------------------------
20
----------------------------------------------------------------
21
Cache CPUClass for use in hot code paths.
21
tcg:
22
Add CPUTLBEntryFull, probe_access_full, tlb_set_page_full.
22
- Add TCGOP_TYPE, TCGOP_FLAGS.
23
Add generic support for TARGET_TB_PCREL.
23
- Pass type and flags to tcg_op_supported, tcg_target_op_def.
24
tcg/ppc: Optimize 26-bit jumps using STQ for POWER 2.07
24
- Split out tcg-target-has.h and unexport from tcg.h.
25
target/sh4: Fix TB_FLAG_UNALIGN
25
- Reorg constraint processing; constify TCGOpDef.
26
- Make extract, sextract, deposit opcodes mandatory.
27
- Merge ext{8,16,32}{s,u} opcodes into {s}extract.
28
tcg/mips: Expand bswap unconditionally
29
tcg/riscv: Use SRAIW, SRLIW for {s}extract_i64
30
tcg/riscv: Use BEXTI for single-bit extractions
31
tcg/sparc64: Use SRA, SRL for {s}extract_i64
32
33
disas/riscv: Guard dec->cfg dereference for host disassemble
34
util/cpuinfo-riscv: Detect Zbs
35
accel/tcg: Call tcg_tb_insert() for one-insn TBs
36
linux-user: Add missing /proc/cpuinfo fields for sparc
26
37
27
----------------------------------------------------------------
38
----------------------------------------------------------------
28
Alex Bennée (3):
39
Helge Deller (1):
29
cpu: cache CPUClass in CPUState for hot code paths
40
linux-user: Add missing /proc/cpuinfo fields for sparc
30
hw/core/cpu-sysemu: used cached class in cpu_asidx_from_attrs
41
31
cputlb: used cached CPUClass in our hot-paths
42
Ilya Leoshkevich (2):
32
43
tcg: Document tb_lookup() and tcg_tb_lookup()
33
Leandro Lupori (1):
44
accel/tcg: Call tcg_tb_insert() for one-insn TBs
34
tcg/ppc: Optimize 26-bit jumps
45
35
46
LIU Zhiwei (1):
36
Richard Henderson (16):
47
disas/riscv: Guard dec->cfg dereference for host disassemble
37
accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull
48
38
accel/tcg: Drop addr member from SavedIOTLB
49
Philippe Mathieu-Daudé (1):
39
accel/tcg: Suppress auto-invalidate in probe_access_internal
50
softfloat: Constify helpers returning float_status field
40
accel/tcg: Introduce probe_access_full
51
41
accel/tcg: Introduce tlb_set_page_full
52
Richard Henderson (63):
42
include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA
53
tcg: Move call abi parameters from tcg-target.h to tcg-target.c.inc
43
accel/tcg: Remove PageDesc code_bitmap
54
tcg: Replace TCGOP_VECL with TCGOP_TYPE
44
accel/tcg: Use bool for page_find_alloc
55
tcg: Move tcg_op_insert_{after,before} decls to tcg-internal.h
45
accel/tcg: Use DisasContextBase in plugin_gen_tb_start
56
tcg: Copy TCGOP_TYPE in tcg_op_insert_{after,before}
46
accel/tcg: Do not align tb->page_addr[0]
57
tcg: Add TCGOP_FLAGS
47
accel/tcg: Inline tb_flush_jmp_cache
58
tcg: Add type and flags arguments to tcg_op_supported
48
include/hw/core: Create struct CPUJumpCache
59
target/arm: Do not test TCG_TARGET_HAS_bitsel_vec
49
hw/core: Add CPUClass.get_pc
60
target/arm: Use tcg_op_supported
50
accel/tcg: Introduce tb_pc and log_pc
61
target/tricore: Use tcg_op_supported
51
accel/tcg: Introduce TARGET_TB_PCREL
62
tcg: Add tcg_op_deposit_valid
52
target/sh4: Fix TB_FLAG_UNALIGN
63
target/i386: Remove TCG_TARGET_extract_tl_valid
53
64
target/i386: Use tcg_op_deposit_valid
54
accel/tcg/internal.h | 10 ++
65
target/i386: Use tcg_op_supported
55
accel/tcg/tb-hash.h | 1 +
66
tcg: Remove TCG_TARGET_NEED_LDST_LABELS and TCG_TARGET_NEED_POOL_LABELS
56
accel/tcg/tb-jmp-cache.h | 65 ++++++++
67
tcg: Rename tcg-target.opc.h to tcg-target-opc.h.inc
57
include/exec/cpu-common.h | 1 +
68
tcg/tci: Move TCI specific opcodes to tcg-target-opc.h.inc
58
include/exec/cpu-defs.h | 48 ++++--
69
tcg: Move fallback tcg_can_emit_vec_op out of line
59
include/exec/exec-all.h | 75 ++++++++-
70
tcg/ppc: Remove TCGPowerISA enum
60
include/exec/plugin-gen.h | 7 +-
71
tcg: Extract default TCG_TARGET_HAS_foo definitions to 'tcg-has.h'
61
include/hw/core/cpu.h | 28 ++--
72
tcg/aarch64: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
62
include/qemu/typedefs.h | 2 +
73
tcg/arm: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
63
include/tcg/tcg.h | 2 +-
74
tcg/i386: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
64
target/sh4/cpu.h | 56 ++++---
75
tcg/loongarch64: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
65
accel/stubs/tcg-stub.c | 4 +
76
tcg/mips: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
66
accel/tcg/cpu-exec.c | 80 +++++-----
77
tcg/ppc: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
67
accel/tcg/cputlb.c | 259 ++++++++++++++++++--------------
78
tcg/riscv: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
68
accel/tcg/plugin-gen.c | 22 +--
79
tcg/s390x: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
69
accel/tcg/translate-all.c | 214 ++++++++++++--------------
80
tcg/sparc64: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
70
accel/tcg/translator.c | 2 +-
81
tcg/tci: Extract TCG_TARGET_HAS_foo defs to 'tcg-target-has.h'
71
cpu.c | 9 +-
82
tcg: Include 'tcg-target-has.h' once in 'tcg-has.h'
72
hw/core/cpu-common.c | 3 +-
83
tcg: Only include 'tcg-has.h' when necessary
73
hw/core/cpu-sysemu.c | 5 +-
84
tcg: Split out tcg-target-mo.h
74
linux-user/sh4/signal.c | 6 +-
85
tcg: Use C_NotImplemented in tcg_target_op_def
75
plugins/core.c | 2 +-
86
tcg: Change have_vec to has_type in tcg_op_supported
76
target/alpha/cpu.c | 9 ++
87
tcg: Reorg process_op_defs
77
target/arm/cpu.c | 17 ++-
88
tcg: Remove args_ct from TCGOpDef
78
target/arm/mte_helper.c | 14 +-
89
tcg: Constify tcg_op_defs
79
target/arm/sve_helper.c | 4 +-
90
tcg: Validate op supported in opcode_args_ct
80
target/arm/translate-a64.c | 2 +-
91
tcg: Add TCG_OPF_NOT_PRESENT to opcodes without inputs or outputs
81
target/avr/cpu.c | 10 +-
92
tcg: Pass type and flags to tcg_target_op_def
82
target/cris/cpu.c | 8 +
93
tcg: Add TCGType argument to tcg_out_op
83
target/hexagon/cpu.c | 10 +-
94
tcg: Remove TCG_OPF_64BIT
84
target/hppa/cpu.c | 12 +-
95
tcg: Drop implementation checks from tcg-opc.h
85
target/i386/cpu.c | 9 ++
96
tcg: Replace IMPLVEC with TCG_OPF_VECTOR
86
target/i386/tcg/tcg-cpu.c | 2 +-
97
tcg/mips: Expand bswap unconditionally
87
target/loongarch/cpu.c | 11 +-
98
tcg/i386: Handle all 8-bit extensions for i686
88
target/m68k/cpu.c | 8 +
99
tcg/i386: Fold the ext{8,16,32}[us] cases into {s}extract
89
target/microblaze/cpu.c | 10 +-
100
tcg/aarch64: Provide TCG_TARGET_{s}extract_valid
90
target/mips/cpu.c | 8 +
101
tcg/aarch64: Expand extract with offset 0 with andi
91
target/mips/tcg/exception.c | 2 +-
102
tcg/arm: Add full [US]XT[BH] into {s}extract
92
target/mips/tcg/sysemu/special_helper.c | 2 +-
103
tcg/loongarch64: Fold the ext{8,16,32}[us] cases into {s}extract
93
target/nios2/cpu.c | 9 ++
104
tcg/mips: Fold the ext{8,16,32}[us] cases into {s}extract
94
target/openrisc/cpu.c | 10 +-
105
tcg/ppc: Fold the ext{8,16,32}[us] cases into {s}extract
95
target/ppc/cpu_init.c | 8 +
106
tcg/riscv64: Fold the ext{8,16,32}[us] cases into {s}extract
96
target/riscv/cpu.c | 17 ++-
107
tcg/riscv: Use SRAIW, SRLIW for {s}extract_i64
97
target/rx/cpu.c | 10 +-
108
tcg/s390x: Fold the ext{8,16,32}[us] cases into {s}extract
98
target/s390x/cpu.c | 8 +
109
tcg/sparc64: Use SRA, SRL for {s}extract_i64
99
target/s390x/tcg/mem_helper.c | 4 -
110
tcg/tci: Provide TCG_TARGET_{s}extract_valid
100
target/sh4/cpu.c | 18 ++-
111
tcg/tci: Remove assertions for deposit and extract
101
target/sh4/helper.c | 6 +-
112
tcg: Remove TCG_TARGET_HAS_{s}extract_{i32,i64}
102
target/sh4/translate.c | 90 +++++------
113
tcg: Remove TCG_TARGET_HAS_deposit_{i32,i64}
103
target/sparc/cpu.c | 10 +-
114
util/cpuinfo-riscv: Detect Zbs
104
target/tricore/cpu.c | 11 +-
115
tcg/riscv: Use BEXTI for single-bit extractions
105
target/xtensa/cpu.c | 8 +
116
106
tcg/tcg.c | 8 +-
117
accel/tcg/internal-target.h | 1 +
107
trace/control-target.c | 2 +-
118
host/include/riscv/host/cpuinfo.h | 5 +-
108
tcg/ppc/tcg-target.c.inc | 119 +++++++++++----
119
include/fpu/softfloat-helpers.h | 25 +-
109
55 files changed, 915 insertions(+), 462 deletions(-)
120
include/tcg/tcg-opc.h | 355 +++++-------
110
create mode 100644 accel/tcg/tb-jmp-cache.h
121
include/tcg/tcg.h | 187 ++----
111
122
linux-user/sparc/target_proc.h | 20 +-
123
tcg/aarch64/tcg-target-has.h | 117 ++++
124
tcg/aarch64/tcg-target-mo.h | 12 +
125
tcg/aarch64/tcg-target.h | 126 ----
126
tcg/arm/tcg-target-has.h | 100 ++++
127
tcg/arm/tcg-target-mo.h | 13 +
128
tcg/arm/tcg-target.h | 86 ---
129
tcg/i386/tcg-target-has.h | 169 ++++++
130
tcg/i386/tcg-target-mo.h | 19 +
131
tcg/i386/tcg-target.h | 162 ------
132
tcg/loongarch64/tcg-target-has.h | 119 ++++
133
tcg/loongarch64/tcg-target-mo.h | 12 +
134
tcg/loongarch64/tcg-target.h | 115 ----
135
tcg/mips/tcg-target-has.h | 135 +++++
136
tcg/mips/tcg-target-mo.h | 13 +
137
tcg/mips/tcg-target.h | 130 -----
138
tcg/ppc/tcg-target-has.h | 131 +++++
139
tcg/ppc/tcg-target-mo.h | 12 +
140
tcg/ppc/tcg-target.h | 126 ----
141
tcg/riscv/tcg-target-has.h | 135 +++++
142
tcg/riscv/tcg-target-mo.h | 12 +
143
tcg/riscv/tcg-target.h | 116 ----
144
tcg/s390x/tcg-target-has.h | 137 +++++
145
tcg/s390x/tcg-target-mo.h | 12 +
146
tcg/s390x/tcg-target.h | 126 ----
147
tcg/sparc64/tcg-target-has.h | 87 +++
148
tcg/sparc64/tcg-target-mo.h | 12 +
149
tcg/sparc64/tcg-target.h | 91 ---
150
tcg/tcg-has.h | 101 ++++
151
tcg/tcg-internal.h | 18 +-
152
tcg/tci/tcg-target-has.h | 81 +++
153
tcg/tci/tcg-target-mo.h | 17 +
154
tcg/tci/tcg-target.h | 94 ---
155
accel/tcg/cpu-exec.c | 15 +-
156
accel/tcg/translate-all.c | 29 +-
157
disas/riscv.c | 23 +-
158
target/arm/tcg/translate-a64.c | 10 +-
159
target/arm/tcg/translate-sve.c | 22 +-
160
target/arm/tcg/translate.c | 2 +-
161
target/tricore/translate.c | 4 +-
162
tcg/optimize.c | 27 +-
163
tcg/tcg-common.c | 5 +-
164
tcg/tcg-op-gvec.c | 1 +
165
tcg/tcg-op-ldst.c | 29 +-
166
tcg/tcg-op-vec.c | 9 +-
167
tcg/tcg-op.c | 149 ++---
168
tcg/tcg.c | 643 ++++++++++++++-------
169
tcg/tci.c | 13 +-
170
util/cpuinfo-riscv.c | 18 +-
171
docs/devel/tcg-ops.rst | 15 +-
172
target/i386/tcg/emit.c.inc | 14 +-
173
.../{tcg-target.opc.h => tcg-target-opc.h.inc} | 4 +-
174
tcg/aarch64/tcg-target.c.inc | 33 +-
175
tcg/arm/{tcg-target.opc.h => tcg-target-opc.h.inc} | 6 +-
176
tcg/arm/tcg-target.c.inc | 71 ++-
177
.../{tcg-target.opc.h => tcg-target-opc.h.inc} | 22 +-
178
tcg/i386/tcg-target.c.inc | 121 +++-
179
.../{tcg-target.opc.h => tcg-target-opc.h.inc} | 0
180
tcg/loongarch64/tcg-target.c.inc | 59 +-
181
tcg/mips/tcg-target-opc.h.inc | 1 +
182
tcg/mips/tcg-target.c.inc | 55 +-
183
tcg/ppc/{tcg-target.opc.h => tcg-target-opc.h.inc} | 12 +-
184
tcg/ppc/tcg-target.c.inc | 39 +-
185
.../{tcg-target.opc.h => tcg-target-opc.h.inc} | 0
186
tcg/riscv/tcg-target.c.inc | 66 ++-
187
.../{tcg-target.opc.h => tcg-target-opc.h.inc} | 6 +-
188
tcg/s390x/tcg-target.c.inc | 59 +-
189
tcg/sparc64/tcg-target-opc.h.inc | 1 +
190
tcg/sparc64/tcg-target.c.inc | 29 +-
191
tcg/tcg-ldst.c.inc | 65 ---
192
tcg/tcg-pool.c.inc | 162 ------
193
tcg/tci/tcg-target-opc.h.inc | 4 +
194
tcg/tci/tcg-target.c.inc | 53 +-
195
78 files changed, 2856 insertions(+), 2269 deletions(-)
196
create mode 100644 tcg/aarch64/tcg-target-has.h
197
create mode 100644 tcg/aarch64/tcg-target-mo.h
198
create mode 100644 tcg/arm/tcg-target-has.h
199
create mode 100644 tcg/arm/tcg-target-mo.h
200
create mode 100644 tcg/i386/tcg-target-has.h
201
create mode 100644 tcg/i386/tcg-target-mo.h
202
create mode 100644 tcg/loongarch64/tcg-target-has.h
203
create mode 100644 tcg/loongarch64/tcg-target-mo.h
204
create mode 100644 tcg/mips/tcg-target-has.h
205
create mode 100644 tcg/mips/tcg-target-mo.h
206
create mode 100644 tcg/ppc/tcg-target-has.h
207
create mode 100644 tcg/ppc/tcg-target-mo.h
208
create mode 100644 tcg/riscv/tcg-target-has.h
209
create mode 100644 tcg/riscv/tcg-target-mo.h
210
create mode 100644 tcg/s390x/tcg-target-has.h
211
create mode 100644 tcg/s390x/tcg-target-mo.h
212
create mode 100644 tcg/sparc64/tcg-target-has.h
213
create mode 100644 tcg/sparc64/tcg-target-mo.h
214
create mode 100644 tcg/tcg-has.h
215
create mode 100644 tcg/tci/tcg-target-has.h
216
create mode 100644 tcg/tci/tcg-target-mo.h
217
rename tcg/aarch64/{tcg-target.opc.h => tcg-target-opc.h.inc} (82%)
218
rename tcg/arm/{tcg-target.opc.h => tcg-target-opc.h.inc} (75%)
219
rename tcg/i386/{tcg-target.opc.h => tcg-target-opc.h.inc} (72%)
220
rename tcg/loongarch64/{tcg-target.opc.h => tcg-target-opc.h.inc} (100%)
221
create mode 100644 tcg/mips/tcg-target-opc.h.inc
222
rename tcg/ppc/{tcg-target.opc.h => tcg-target-opc.h.inc} (83%)
223
rename tcg/riscv/{tcg-target.opc.h => tcg-target-opc.h.inc} (100%)
224
rename tcg/s390x/{tcg-target.opc.h => tcg-target-opc.h.inc} (75%)
225
create mode 100644 tcg/sparc64/tcg-target-opc.h.inc
226
delete mode 100644 tcg/tcg-ldst.c.inc
227
delete mode 100644 tcg/tcg-pool.c.inc
228
create mode 100644 tcg/tci/tcg-target-opc.h.inc
229
diff view generated by jsdifflib
New patch
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
1
2
3
For riscv host, it will set dec->cfg to zero. Thus we shuld guard
4
the dec->cfg deference for riscv host disassemble.
5
6
And in general, we should only use dec->cfg for target in three cases:
7
8
1) For not incompatible encodings, such as zcmp/zcmt/zfinx.
9
2) For maybe-ops encodings, they are better to be disassembled to
10
the "real" extensions, such as zicfiss. The guard of dec->zimop
11
and dec->zcmop is for comment and avoid check for every extension
12
that encoded in maybe-ops area.
13
3) For custom encodings, we have to use dec->cfg to disassemble
14
custom encodings using the same encoding area.
15
16
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
17
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
18
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
19
Message-ID: <20241206032411.52528-1-zhiwei_liu@linux.alibaba.com>
20
---
21
disas/riscv.c | 23 ++++++++++++-----------
22
1 file changed, 12 insertions(+), 11 deletions(-)
23
24
diff --git a/disas/riscv.c b/disas/riscv.c
25
index XXXXXXX..XXXXXXX 100644
26
--- a/disas/riscv.c
27
+++ b/disas/riscv.c
28
@@ -XXX,XX +XXX,XX @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
29
break;
30
case 2: op = rv_op_c_li; break;
31
case 3:
32
- if (dec->cfg->ext_zcmop) {
33
+ if (dec->cfg && dec->cfg->ext_zcmop) {
34
if ((((inst >> 2) & 0b111111) == 0b100000) &&
35
(((inst >> 11) & 0b11) == 0b0)) {
36
unsigned int cmop_code = 0;
37
@@ -XXX,XX +XXX,XX @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
38
op = rv_op_c_sqsp;
39
} else {
40
op = rv_op_c_fsdsp;
41
- if (dec->cfg->ext_zcmp && ((inst >> 12) & 0b01)) {
42
+ if (dec->cfg && dec->cfg->ext_zcmp && ((inst >> 12) & 0b01)) {
43
switch ((inst >> 8) & 0b01111) {
44
case 8:
45
if (((inst >> 4) & 0b01111) >= 4) {
46
@@ -XXX,XX +XXX,XX @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
47
} else {
48
switch ((inst >> 10) & 0b011) {
49
case 0:
50
- if (!dec->cfg->ext_zcmt) {
51
+ if (dec->cfg && !dec->cfg->ext_zcmt) {
52
break;
53
}
54
if (((inst >> 2) & 0xFF) >= 32) {
55
@@ -XXX,XX +XXX,XX @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
56
}
57
break;
58
case 3:
59
- if (!dec->cfg->ext_zcmp) {
60
+ if (dec->cfg && !dec->cfg->ext_zcmp) {
61
break;
62
}
63
switch ((inst >> 5) & 0b011) {
64
@@ -XXX,XX +XXX,XX @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
65
break;
66
case 5:
67
op = rv_op_auipc;
68
- if (dec->cfg->ext_zicfilp &&
69
+ if (dec->cfg && dec->cfg->ext_zicfilp &&
70
(((inst >> 7) & 0b11111) == 0b00000)) {
71
op = rv_op_lpad;
72
}
73
@@ -XXX,XX +XXX,XX @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa)
74
case 2: op = rv_op_csrrs; break;
75
case 3: op = rv_op_csrrc; break;
76
case 4:
77
- if (dec->cfg->ext_zimop) {
78
+ if (dec->cfg && dec->cfg->ext_zimop) {
79
int imm_mop5, imm_mop3, reg_num;
80
if ((extract32(inst, 22, 10) & 0b1011001111)
81
== 0b1000000111) {
82
@@ -XXX,XX +XXX,XX @@ static GString *format_inst(size_t tab, rv_decode *dec)
83
g_string_append(buf, rv_ireg_name_sym[dec->rs2]);
84
break;
85
case '3':
86
- if (dec->cfg->ext_zfinx) {
87
+ if (dec->cfg && dec->cfg->ext_zfinx) {
88
g_string_append(buf, rv_ireg_name_sym[dec->rd]);
89
} else {
90
g_string_append(buf, rv_freg_name_sym[dec->rd]);
91
}
92
break;
93
case '4':
94
- if (dec->cfg->ext_zfinx) {
95
+ if (dec->cfg && dec->cfg->ext_zfinx) {
96
g_string_append(buf, rv_ireg_name_sym[dec->rs1]);
97
} else {
98
g_string_append(buf, rv_freg_name_sym[dec->rs1]);
99
}
100
break;
101
case '5':
102
- if (dec->cfg->ext_zfinx) {
103
+ if (dec->cfg && dec->cfg->ext_zfinx) {
104
g_string_append(buf, rv_ireg_name_sym[dec->rs2]);
105
} else {
106
g_string_append(buf, rv_freg_name_sym[dec->rs2]);
107
}
108
break;
109
case '6':
110
- if (dec->cfg->ext_zfinx) {
111
+ if (dec->cfg && dec->cfg->ext_zfinx) {
112
g_string_append(buf, rv_ireg_name_sym[dec->rs3]);
113
} else {
114
g_string_append(buf, rv_freg_name_sym[dec->rs3]);
115
@@ -XXX,XX +XXX,XX @@ static GString *disasm_inst(rv_isa isa, uint64_t pc, rv_inst inst,
116
const rv_opcode_data *opcode_data = decoders[i].opcode_data;
117
void (*decode_func)(rv_decode *, rv_isa) = decoders[i].decode_func;
118
119
- if (guard_func(cfg)) {
120
+ /* always_true_p don't dereference cfg */
121
+ if (((i == 0) || cfg) && guard_func(cfg)) {
122
dec.opcode_data = opcode_data;
123
decode_func(&dec, isa);
124
if (dec.op != rv_op_illegal)
125
--
126
2.43.0
diff view generated by jsdifflib
New patch
1
These defines are not required outside of tcg/tcg.c,
2
which includes tcg-target.c.inc before use.
3
Reduces the exported symbol set of tcg-target.h.
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/aarch64/tcg-target.h | 13 -------------
9
tcg/arm/tcg-target.h | 8 --------
10
tcg/i386/tcg-target.h | 20 --------------------
11
tcg/loongarch64/tcg-target.h | 9 ---------
12
tcg/mips/tcg-target.h | 14 --------------
13
tcg/riscv/tcg-target.h | 9 ---------
14
tcg/s390x/tcg-target.h | 8 --------
15
tcg/sparc64/tcg-target.h | 11 -----------
16
tcg/tci/tcg-target.h | 14 --------------
17
tcg/aarch64/tcg-target.c.inc | 13 +++++++++++++
18
tcg/arm/tcg-target.c.inc | 8 ++++++++
19
tcg/i386/tcg-target.c.inc | 20 ++++++++++++++++++++
20
tcg/loongarch64/tcg-target.c.inc | 9 +++++++++
21
tcg/mips/tcg-target.c.inc | 14 ++++++++++++++
22
tcg/riscv/tcg-target.c.inc | 9 +++++++++
23
tcg/s390x/tcg-target.c.inc | 8 ++++++++
24
tcg/sparc64/tcg-target.c.inc | 10 ++++++++++
25
tcg/tci/tcg-target.c.inc | 14 ++++++++++++++
26
18 files changed, 105 insertions(+), 106 deletions(-)
27
28
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tcg/aarch64/tcg-target.h
31
+++ b/tcg/aarch64/tcg-target.h
32
@@ -XXX,XX +XXX,XX @@ typedef enum {
33
34
#define TCG_TARGET_NB_REGS 64
35
36
-/* used for function call generation */
37
-#define TCG_REG_CALL_STACK TCG_REG_SP
38
-#define TCG_TARGET_STACK_ALIGN 16
39
-#define TCG_TARGET_CALL_STACK_OFFSET 0
40
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
41
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
42
-#ifdef CONFIG_DARWIN
43
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
44
-#else
45
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
46
-#endif
47
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
48
-
49
#define have_lse (cpuinfo & CPUINFO_LSE)
50
#define have_lse2 (cpuinfo & CPUINFO_LSE2)
51
52
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/arm/tcg-target.h
55
+++ b/tcg/arm/tcg-target.h
56
@@ -XXX,XX +XXX,XX @@ extern bool use_idiv_instructions;
57
extern bool use_neon_instructions;
58
#endif
59
60
-/* used for function call generation */
61
-#define TCG_TARGET_STACK_ALIGN        8
62
-#define TCG_TARGET_CALL_STACK_OFFSET    0
63
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
64
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
65
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
66
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
67
-
68
/* optional instructions */
69
#define TCG_TARGET_HAS_ext8s_i32 1
70
#define TCG_TARGET_HAS_ext16s_i32 1
71
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
72
index XXXXXXX..XXXXXXX 100644
73
--- a/tcg/i386/tcg-target.h
74
+++ b/tcg/i386/tcg-target.h
75
@@ -XXX,XX +XXX,XX @@ typedef enum {
76
TCG_REG_CALL_STACK = TCG_REG_ESP
77
} TCGReg;
78
79
-/* used for function call generation */
80
-#define TCG_TARGET_STACK_ALIGN 16
81
-#if defined(_WIN64)
82
-#define TCG_TARGET_CALL_STACK_OFFSET 32
83
-#else
84
-#define TCG_TARGET_CALL_STACK_OFFSET 0
85
-#endif
86
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
87
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
88
-#if defined(_WIN64)
89
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
90
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_VEC
91
-#elif TCG_TARGET_REG_BITS == 64
92
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
93
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
94
-#else
95
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
96
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
97
-#endif
98
-
99
#define have_bmi1 (cpuinfo & CPUINFO_BMI1)
100
#define have_popcnt (cpuinfo & CPUINFO_POPCNT)
101
#define have_avx1 (cpuinfo & CPUINFO_AVX1)
102
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
103
index XXXXXXX..XXXXXXX 100644
104
--- a/tcg/loongarch64/tcg-target.h
105
+++ b/tcg/loongarch64/tcg-target.h
106
@@ -XXX,XX +XXX,XX @@ typedef enum {
107
TCG_VEC_TMP0 = TCG_REG_V23,
108
} TCGReg;
109
110
-/* used for function call generation */
111
-#define TCG_REG_CALL_STACK TCG_REG_SP
112
-#define TCG_TARGET_STACK_ALIGN 16
113
-#define TCG_TARGET_CALL_STACK_OFFSET 0
114
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
115
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
116
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
117
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
118
-
119
/* optional instructions */
120
#define TCG_TARGET_HAS_negsetcond_i32 0
121
#define TCG_TARGET_HAS_div_i32 1
122
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/tcg/mips/tcg-target.h
125
+++ b/tcg/mips/tcg-target.h
126
@@ -XXX,XX +XXX,XX @@ typedef enum {
127
TCG_AREG0 = TCG_REG_S8,
128
} TCGReg;
129
130
-/* used for function call generation */
131
-#define TCG_TARGET_STACK_ALIGN 16
132
-#if _MIPS_SIM == _ABIO32
133
-# define TCG_TARGET_CALL_STACK_OFFSET 16
134
-# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
135
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
136
-#else
137
-# define TCG_TARGET_CALL_STACK_OFFSET 0
138
-# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
139
-# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
140
-#endif
141
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
142
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
143
-
144
/* MOVN/MOVZ instructions detection */
145
#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \
146
defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \
147
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/tcg/riscv/tcg-target.h
150
+++ b/tcg/riscv/tcg-target.h
151
@@ -XXX,XX +XXX,XX @@ typedef enum {
152
TCG_REG_TMP2 = TCG_REG_T4,
153
} TCGReg;
154
155
-/* used for function call generation */
156
-#define TCG_REG_CALL_STACK TCG_REG_SP
157
-#define TCG_TARGET_STACK_ALIGN 16
158
-#define TCG_TARGET_CALL_STACK_OFFSET 0
159
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
160
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
161
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
162
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
163
-
164
/* optional instructions */
165
#define TCG_TARGET_HAS_negsetcond_i32 1
166
#define TCG_TARGET_HAS_div_i32 1
167
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
168
index XXXXXXX..XXXXXXX 100644
169
--- a/tcg/s390x/tcg-target.h
170
+++ b/tcg/s390x/tcg-target.h
171
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
172
#define TCG_TARGET_HAS_cmpsel_vec 1
173
#define TCG_TARGET_HAS_tst_vec 0
174
175
-/* used for function call generation */
176
-#define TCG_TARGET_STACK_ALIGN        8
177
-#define TCG_TARGET_CALL_STACK_OFFSET    160
178
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
179
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
180
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
181
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
182
-
183
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
184
#define TCG_TARGET_NEED_LDST_LABELS
185
#define TCG_TARGET_NEED_POOL_LABELS
186
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
187
index XXXXXXX..XXXXXXX 100644
188
--- a/tcg/sparc64/tcg-target.h
189
+++ b/tcg/sparc64/tcg-target.h
190
@@ -XXX,XX +XXX,XX @@ typedef enum {
191
TCG_REG_I7,
192
} TCGReg;
193
194
-/* used for function call generation */
195
-#define TCG_REG_CALL_STACK TCG_REG_O6
196
-
197
-#define TCG_TARGET_STACK_BIAS 2047
198
-#define TCG_TARGET_STACK_ALIGN 16
199
-#define TCG_TARGET_CALL_STACK_OFFSET (128 + 6*8 + TCG_TARGET_STACK_BIAS)
200
-#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
201
-#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
202
-#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
203
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
204
-
205
#if defined(__VIS__) && __VIS__ >= 0x300
206
#define use_vis3_instructions 1
207
#else
208
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
209
index XXXXXXX..XXXXXXX 100644
210
--- a/tcg/tci/tcg-target.h
211
+++ b/tcg/tci/tcg-target.h
212
@@ -XXX,XX +XXX,XX @@ typedef enum {
213
TCG_REG_CALL_STACK = TCG_REG_R15,
214
} TCGReg;
215
216
-/* Used for function call generation. */
217
-#define TCG_TARGET_CALL_STACK_OFFSET 0
218
-#define TCG_TARGET_STACK_ALIGN 8
219
-#if TCG_TARGET_REG_BITS == 32
220
-# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN
221
-# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
222
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
223
-#else
224
-# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
225
-# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
226
-# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
227
-#endif
228
-#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
229
-
230
#define HAVE_TCG_QEMU_TB_EXEC
231
#define TCG_TARGET_NEED_POOL_LABELS
232
233
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
234
index XXXXXXX..XXXXXXX 100644
235
--- a/tcg/aarch64/tcg-target.c.inc
236
+++ b/tcg/aarch64/tcg-target.c.inc
237
@@ -XXX,XX +XXX,XX @@
238
#include "../tcg-pool.c.inc"
239
#include "qemu/bitops.h"
240
241
+/* Used for function call generation. */
242
+#define TCG_REG_CALL_STACK TCG_REG_SP
243
+#define TCG_TARGET_STACK_ALIGN 16
244
+#define TCG_TARGET_CALL_STACK_OFFSET 0
245
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
246
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
247
+#ifdef CONFIG_DARWIN
248
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
249
+#else
250
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
251
+#endif
252
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
253
+
254
/* We're going to re-use TCGType in setting of the SF bit, which controls
255
the size of the operation performed. If we know the values match, it
256
makes things much cleaner. */
257
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
258
index XXXXXXX..XXXXXXX 100644
259
--- a/tcg/arm/tcg-target.c.inc
260
+++ b/tcg/arm/tcg-target.c.inc
261
@@ -XXX,XX +XXX,XX @@ bool use_idiv_instructions;
262
bool use_neon_instructions;
263
#endif
264
265
+/* Used for function call generation. */
266
+#define TCG_TARGET_STACK_ALIGN 8
267
+#define TCG_TARGET_CALL_STACK_OFFSET 0
268
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
269
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
270
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
271
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
272
+
273
#ifdef CONFIG_DEBUG_TCG
274
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
275
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
276
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
277
index XXXXXXX..XXXXXXX 100644
278
--- a/tcg/i386/tcg-target.c.inc
279
+++ b/tcg/i386/tcg-target.c.inc
280
@@ -XXX,XX +XXX,XX @@
281
#include "../tcg-ldst.c.inc"
282
#include "../tcg-pool.c.inc"
283
284
+/* Used for function call generation. */
285
+#define TCG_TARGET_STACK_ALIGN 16
286
+#if defined(_WIN64)
287
+#define TCG_TARGET_CALL_STACK_OFFSET 32
288
+#else
289
+#define TCG_TARGET_CALL_STACK_OFFSET 0
290
+#endif
291
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
292
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
293
+#if defined(_WIN64)
294
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
295
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_VEC
296
+#elif TCG_TARGET_REG_BITS == 64
297
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
298
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
299
+#else
300
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
301
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
302
+#endif
303
+
304
#ifdef CONFIG_DEBUG_TCG
305
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
306
#if TCG_TARGET_REG_BITS == 64
307
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
308
index XXXXXXX..XXXXXXX 100644
309
--- a/tcg/loongarch64/tcg-target.c.inc
310
+++ b/tcg/loongarch64/tcg-target.c.inc
311
@@ -XXX,XX +XXX,XX @@
312
#include "../tcg-ldst.c.inc"
313
#include <asm/hwcap.h>
314
315
+/* used for function call generation */
316
+#define TCG_REG_CALL_STACK TCG_REG_SP
317
+#define TCG_TARGET_STACK_ALIGN 16
318
+#define TCG_TARGET_CALL_STACK_OFFSET 0
319
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
320
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
321
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
322
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
323
+
324
#ifdef CONFIG_DEBUG_TCG
325
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
326
"zero",
327
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
328
index XXXXXXX..XXXXXXX 100644
329
--- a/tcg/mips/tcg-target.c.inc
330
+++ b/tcg/mips/tcg-target.c.inc
331
@@ -XXX,XX +XXX,XX @@
332
#include "../tcg-ldst.c.inc"
333
#include "../tcg-pool.c.inc"
334
335
+/* used for function call generation */
336
+#define TCG_TARGET_STACK_ALIGN 16
337
+#if _MIPS_SIM == _ABIO32
338
+# define TCG_TARGET_CALL_STACK_OFFSET 16
339
+# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
340
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
341
+#else
342
+# define TCG_TARGET_CALL_STACK_OFFSET 0
343
+# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
344
+# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
345
+#endif
346
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
347
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
348
+
349
#if TCG_TARGET_REG_BITS == 32
350
# define LO_OFF (HOST_BIG_ENDIAN * 4)
351
# define HI_OFF (4 - LO_OFF)
352
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
353
index XXXXXXX..XXXXXXX 100644
354
--- a/tcg/riscv/tcg-target.c.inc
355
+++ b/tcg/riscv/tcg-target.c.inc
356
@@ -XXX,XX +XXX,XX @@
357
#include "../tcg-ldst.c.inc"
358
#include "../tcg-pool.c.inc"
359
360
+/* Used for function call generation. */
361
+#define TCG_REG_CALL_STACK TCG_REG_SP
362
+#define TCG_TARGET_STACK_ALIGN 16
363
+#define TCG_TARGET_CALL_STACK_OFFSET 0
364
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
365
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
366
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
367
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
368
+
369
#ifdef CONFIG_DEBUG_TCG
370
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
371
"zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
372
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
373
index XXXXXXX..XXXXXXX 100644
374
--- a/tcg/s390x/tcg-target.c.inc
375
+++ b/tcg/s390x/tcg-target.c.inc
376
@@ -XXX,XX +XXX,XX @@
377
#include "../tcg-pool.c.inc"
378
#include "elf.h"
379
380
+/* Used for function call generation. */
381
+#define TCG_TARGET_STACK_ALIGN 8
382
+#define TCG_TARGET_CALL_STACK_OFFSET 160
383
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
384
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
385
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
386
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
387
+
388
#define TCG_CT_CONST_S16 (1 << 8)
389
#define TCG_CT_CONST_S32 (1 << 9)
390
#define TCG_CT_CONST_U32 (1 << 10)
391
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
392
index XXXXXXX..XXXXXXX 100644
393
--- a/tcg/sparc64/tcg-target.c.inc
394
+++ b/tcg/sparc64/tcg-target.c.inc
395
@@ -XXX,XX +XXX,XX @@
396
#include "../tcg-ldst.c.inc"
397
#include "../tcg-pool.c.inc"
398
399
+/* Used for function call generation. */
400
+#define TCG_REG_CALL_STACK TCG_REG_O6
401
+#define TCG_TARGET_STACK_BIAS 2047
402
+#define TCG_TARGET_STACK_ALIGN 16
403
+#define TCG_TARGET_CALL_STACK_OFFSET (128 + 6 * 8 + TCG_TARGET_STACK_BIAS)
404
+#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
405
+#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
406
+#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
407
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
408
+
409
#ifdef CONFIG_DEBUG_TCG
410
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
411
"%g0",
412
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
413
index XXXXXXX..XXXXXXX 100644
414
--- a/tcg/tci/tcg-target.c.inc
415
+++ b/tcg/tci/tcg-target.c.inc
416
@@ -XXX,XX +XXX,XX @@
417
418
#include "../tcg-pool.c.inc"
419
420
+/* Used for function call generation. */
421
+#define TCG_TARGET_CALL_STACK_OFFSET 0
422
+#define TCG_TARGET_STACK_ALIGN 8
423
+#if TCG_TARGET_REG_BITS == 32
424
+# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EVEN
425
+# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
426
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN
427
+#else
428
+# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
429
+# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
430
+# define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
431
+#endif
432
+#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
433
+
434
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
435
{
436
switch (op) {
437
--
438
2.43.0
439
440
diff view generated by jsdifflib
1
The availability of tb->pc will shortly be conditional.
1
In the replacement, drop the TCGType - TCG_TYPE_V64 adjustment,
2
Introduce accessor functions to minimize ifdefs.
2
except for the call to tcg_out_vec_op. Pass type to tcg_gen_op[1-6],
3
so that all integer opcodes gain the type.
3
4
4
Pass around a known pc to places like tcg_gen_code,
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
where the caller must already have the value.
6
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
accel/tcg/internal.h | 6 ++++
8
include/tcg/tcg.h | 2 +-
11
include/exec/exec-all.h | 6 ++++
9
tcg/tcg-internal.h | 13 ++---
12
include/tcg/tcg.h | 2 +-
10
tcg/optimize.c | 10 +---
13
accel/tcg/cpu-exec.c | 46 ++++++++++++++-----------
11
tcg/tcg-op-ldst.c | 26 ++++++----
14
accel/tcg/translate-all.c | 37 +++++++++++---------
12
tcg/tcg-op-vec.c | 8 +--
15
target/arm/cpu.c | 4 +--
13
tcg/tcg-op.c | 113 +++++++++++++++++++++++------------------
16
target/avr/cpu.c | 2 +-
14
tcg/tcg.c | 11 ++--
17
target/hexagon/cpu.c | 2 +-
15
docs/devel/tcg-ops.rst | 15 +++---
18
target/hppa/cpu.c | 4 +--
16
8 files changed, 105 insertions(+), 93 deletions(-)
19
target/i386/tcg/tcg-cpu.c | 2 +-
20
target/loongarch/cpu.c | 2 +-
21
target/microblaze/cpu.c | 2 +-
22
target/mips/tcg/exception.c | 2 +-
23
target/mips/tcg/sysemu/special_helper.c | 2 +-
24
target/openrisc/cpu.c | 2 +-
25
target/riscv/cpu.c | 4 +--
26
target/rx/cpu.c | 2 +-
27
target/sh4/cpu.c | 4 +--
28
target/sparc/cpu.c | 2 +-
29
target/tricore/cpu.c | 2 +-
30
tcg/tcg.c | 8 ++---
31
21 files changed, 82 insertions(+), 61 deletions(-)
32
17
33
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/accel/tcg/internal.h
36
+++ b/accel/tcg/internal.h
37
@@ -XXX,XX +XXX,XX @@ G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
38
void page_init(void);
39
void tb_htable_init(void);
40
41
+/* Return the current PC from CPU, which may be cached in TB. */
42
+static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
43
+{
44
+ return tb_pc(tb);
45
+}
46
+
47
#endif /* ACCEL_TCG_INTERNAL_H */
48
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
49
index XXXXXXX..XXXXXXX 100644
50
--- a/include/exec/exec-all.h
51
+++ b/include/exec/exec-all.h
52
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
53
uintptr_t jmp_dest[2];
54
};
55
56
+/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */
57
+static inline target_ulong tb_pc(const TranslationBlock *tb)
58
+{
59
+ return tb->pc;
60
+}
61
+
62
/* Hide the qatomic_read to make code a little easier on the eyes */
63
static inline uint32_t tb_cflags(const TranslationBlock *tb)
64
{
65
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
18
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
66
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
67
--- a/include/tcg/tcg.h
20
--- a/include/tcg/tcg.h
68
+++ b/include/tcg/tcg.h
21
+++ b/include/tcg/tcg.h
69
@@ -XXX,XX +XXX,XX @@ void tcg_register_thread(void);
22
@@ -XXX,XX +XXX,XX @@ struct TCGOp {
70
void tcg_prologue_init(TCGContext *s);
23
#define TCGOP_CALLI(X) (X)->param1
71
void tcg_func_start(TCGContext *s);
24
#define TCGOP_CALLO(X) (X)->param2
72
25
73
-int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
26
-#define TCGOP_VECL(X) (X)->param1
74
+int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start);
27
+#define TCGOP_TYPE(X) (X)->param1
75
28
#define TCGOP_VECE(X) (X)->param2
76
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
29
77
30
/* Make sure operands fit in the bitfields above. */
78
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
31
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
79
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
80
--- a/accel/tcg/cpu-exec.c
33
--- a/tcg/tcg-internal.h
81
+++ b/accel/tcg/cpu-exec.c
34
+++ b/tcg/tcg-internal.h
82
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
35
@@ -XXX,XX +XXX,XX @@ TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind);
83
const TranslationBlock *tb = p;
36
*/
84
const struct tb_desc *desc = d;
37
TCGTemp *tcg_constant_internal(TCGType type, int64_t val);
85
38
86
- if (tb->pc == desc->pc &&
39
-TCGOp *tcg_gen_op1(TCGOpcode, TCGArg);
87
+ if (tb_pc(tb) == desc->pc &&
40
-TCGOp *tcg_gen_op2(TCGOpcode, TCGArg, TCGArg);
88
tb->page_addr[0] == desc->page_addr0 &&
41
-TCGOp *tcg_gen_op3(TCGOpcode, TCGArg, TCGArg, TCGArg);
89
tb->cs_base == desc->cs_base &&
42
-TCGOp *tcg_gen_op4(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg);
90
tb->flags == desc->flags &&
43
-TCGOp *tcg_gen_op5(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
91
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
44
-TCGOp *tcg_gen_op6(TCGOpcode, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
92
return tb;
45
+TCGOp *tcg_gen_op1(TCGOpcode, TCGType, TCGArg);
93
}
46
+TCGOp *tcg_gen_op2(TCGOpcode, TCGType, TCGArg, TCGArg);
94
47
+TCGOp *tcg_gen_op3(TCGOpcode, TCGType, TCGArg, TCGArg, TCGArg);
95
-static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
48
+TCGOp *tcg_gen_op4(TCGOpcode, TCGType, TCGArg, TCGArg, TCGArg, TCGArg);
96
- const TranslationBlock *tb)
49
+TCGOp *tcg_gen_op5(TCGOpcode, TCGType, TCGArg, TCGArg, TCGArg, TCGArg, TCGArg);
97
+static void log_cpu_exec(target_ulong pc, CPUState *cpu,
50
+TCGOp *tcg_gen_op6(TCGOpcode, TCGType, TCGArg, TCGArg,
98
+ const TranslationBlock *tb)
51
+ TCGArg, TCGArg, TCGArg, TCGArg);
99
{
52
100
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC))
53
void vec_gen_2(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg);
101
- && qemu_log_in_addr_range(pc)) {
54
void vec_gen_3(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg);
102
-
55
diff --git a/tcg/optimize.c b/tcg/optimize.c
103
+ if (qemu_log_in_addr_range(pc)) {
56
index XXXXXXX..XXXXXXX 100644
104
qemu_log_mask(CPU_LOG_EXEC,
57
--- a/tcg/optimize.c
105
"Trace %d: %p [" TARGET_FMT_lx
58
+++ b/tcg/optimize.c
106
"/" TARGET_FMT_lx "/%08x/%08x] %s\n",
59
@@ -XXX,XX +XXX,XX @@ static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
107
@@ -XXX,XX +XXX,XX @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
60
case TCG_TYPE_V64:
108
return tcg_code_gen_epilogue;
61
case TCG_TYPE_V128:
109
}
62
case TCG_TYPE_V256:
110
63
- /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
111
- log_cpu_exec(pc, cpu, tb);
64
+ /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */
112
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
65
new_op = INDEX_op_mov_vec;
113
+ log_cpu_exec(pc, cpu, tb);
66
break;
114
+ }
67
default:
115
68
@@ -XXX,XX +XXX,XX @@ void tcg_optimize(TCGContext *s)
116
return tb->tc.ptr;
69
copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
117
}
70
118
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
71
/* Pre-compute the type of the operation. */
119
TranslationBlock *last_tb;
72
- if (def->flags & TCG_OPF_VECTOR) {
120
const void *tb_ptr = itb->tc.ptr;
73
- ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
121
74
- } else if (def->flags & TCG_OPF_64BIT) {
122
- log_cpu_exec(itb->pc, cpu, itb);
75
- ctx.type = TCG_TYPE_I64;
123
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
76
- } else {
124
+ log_cpu_exec(log_pc(cpu, itb), cpu, itb);
77
- ctx.type = TCG_TYPE_I32;
125
+ }
78
- }
126
79
+ ctx.type = TCGOP_TYPE(op);
127
qemu_thread_jit_execute();
80
128
ret = tcg_qemu_tb_exec(env, tb_ptr);
81
/*
129
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
82
* Process each opcode.
130
* of the start of the TB.
83
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
131
*/
84
index XXXXXXX..XXXXXXX 100644
132
CPUClass *cc = CPU_GET_CLASS(cpu);
85
--- a/tcg/tcg-op-ldst.c
133
- qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
86
+++ b/tcg/tcg-op-ldst.c
134
- "Stopped execution of TB chain before %p ["
87
@@ -XXX,XX +XXX,XX @@ static MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st)
135
- TARGET_FMT_lx "] %s\n",
88
return op;
136
- last_tb->tc.ptr, last_tb->pc,
89
}
137
- lookup_symbol(last_tb->pc));
90
138
+
91
-static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh,
139
if (cc->tcg_ops->synchronize_from_tb) {
92
+static void gen_ldst(TCGOpcode opc, TCGType type, TCGTemp *vl, TCGTemp *vh,
140
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
93
TCGTemp *addr, MemOpIdx oi)
94
{
95
if (TCG_TARGET_REG_BITS == 64 || tcg_ctx->addr_type == TCG_TYPE_I32) {
96
if (vh) {
97
- tcg_gen_op4(opc, temp_arg(vl), temp_arg(vh), temp_arg(addr), oi);
98
+ tcg_gen_op4(opc, type, temp_arg(vl), temp_arg(vh),
99
+ temp_arg(addr), oi);
141
} else {
100
} else {
142
assert(cc->set_pc);
101
- tcg_gen_op3(opc, temp_arg(vl), temp_arg(addr), oi);
143
- cc->set_pc(cpu, last_tb->pc);
102
+ tcg_gen_op3(opc, type, temp_arg(vl), temp_arg(addr), oi);
144
+ cc->set_pc(cpu, tb_pc(last_tb));
103
}
145
+ }
104
} else {
146
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
105
/* See TCGV_LOW/HIGH. */
147
+ target_ulong pc = log_pc(cpu, last_tb);
106
@@ -XXX,XX +XXX,XX @@ static void gen_ldst(TCGOpcode opc, TCGTemp *vl, TCGTemp *vh,
148
+ if (qemu_log_in_addr_range(pc)) {
107
TCGTemp *ah = addr + !HOST_BIG_ENDIAN;
149
+ qemu_log("Stopped execution of TB chain before %p ["
108
150
+ TARGET_FMT_lx "] %s\n",
109
if (vh) {
151
+ last_tb->tc.ptr, pc, lookup_symbol(pc));
110
- tcg_gen_op5(opc, temp_arg(vl), temp_arg(vh),
152
+ }
111
+ tcg_gen_op5(opc, type, temp_arg(vl), temp_arg(vh),
112
temp_arg(al), temp_arg(ah), oi);
113
} else {
114
- tcg_gen_op4(opc, temp_arg(vl), temp_arg(al), temp_arg(ah), oi);
115
+ tcg_gen_op4(opc, type, temp_arg(vl),
116
+ temp_arg(al), temp_arg(ah), oi);
153
}
117
}
154
}
118
}
155
119
}
156
@@ -XXX,XX +XXX,XX @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
120
@@ -XXX,XX +XXX,XX @@ static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 v, TCGTemp *addr, MemOpIdx oi)
157
121
if (TCG_TARGET_REG_BITS == 32) {
158
qemu_spin_unlock(&tb_next->jmp_lock);
122
TCGTemp *vl = tcgv_i32_temp(TCGV_LOW(v));
159
123
TCGTemp *vh = tcgv_i32_temp(TCGV_HIGH(v));
160
- qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
124
- gen_ldst(opc, vl, vh, addr, oi);
161
- "Linking TBs %p [" TARGET_FMT_lx
125
+ gen_ldst(opc, TCG_TYPE_I64, vl, vh, addr, oi);
162
- "] index %d -> %p [" TARGET_FMT_lx "]\n",
126
} else {
163
- tb->tc.ptr, tb->pc, n,
127
- gen_ldst(opc, tcgv_i64_temp(v), NULL, addr, oi);
164
- tb_next->tc.ptr, tb_next->pc);
128
+ gen_ldst(opc, TCG_TYPE_I64, tcgv_i64_temp(v), NULL, addr, oi);
165
+ qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
129
}
166
+ tb->tc.ptr, n, tb_next->tc.ptr);
130
}
167
return;
131
168
132
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr,
169
out_unlock_next:
133
} else {
170
@@ -XXX,XX +XXX,XX @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
134
opc = INDEX_op_qemu_ld_a64_i32;
171
}
135
}
172
136
- gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
173
static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
137
+ gen_ldst(opc, TCG_TYPE_I32, tcgv_i32_temp(val), NULL, addr, oi);
174
+ target_ulong pc,
138
plugin_gen_mem_callbacks_i32(val, copy_addr, addr, orig_oi,
175
TranslationBlock **last_tb, int *tb_exit)
139
QEMU_PLUGIN_MEM_R);
176
{
140
177
int32_t insns_left;
141
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr,
178
142
opc = INDEX_op_qemu_st_a64_i32;
179
- trace_exec_tb(tb, tb->pc);
143
}
180
+ trace_exec_tb(tb, pc);
144
}
181
tb = cpu_tb_exec(cpu, tb, tb_exit);
145
- gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi);
182
if (*tb_exit != TB_EXIT_REQUESTED) {
146
+ gen_ldst(opc, TCG_TYPE_I32, tcgv_i32_temp(val), NULL, addr, oi);
183
*last_tb = tb;
147
plugin_gen_mem_callbacks_i32(val, NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W);
184
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
148
185
tb_add_jump(last_tb, tb_exit, tb);
149
if (swap) {
186
}
150
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
187
151
} else {
188
- cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
152
opc = INDEX_op_qemu_ld_a64_i128;
189
+ cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
153
}
190
154
- gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
191
/* Try to align the host and virtual clocks
155
+ gen_ldst(opc, TCG_TYPE_I128, tcgv_i64_temp(lo),
192
if the guest is in advance */
156
+ tcgv_i64_temp(hi), addr, oi);
193
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
157
158
if (need_bswap) {
159
tcg_gen_bswap64_i64(lo, lo);
160
@@ -XXX,XX +XXX,XX @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
161
} else {
162
opc = INDEX_op_qemu_st_a64_i128;
163
}
164
- gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi);
165
+ gen_ldst(opc, TCG_TYPE_I128, tcgv_i64_temp(lo),
166
+ tcgv_i64_temp(hi), addr, oi);
167
168
if (need_bswap) {
169
tcg_temp_free_i64(lo);
170
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
194
index XXXXXXX..XXXXXXX 100644
171
index XXXXXXX..XXXXXXX 100644
195
--- a/accel/tcg/translate-all.c
172
--- a/tcg/tcg-op-vec.c
196
+++ b/accel/tcg/translate-all.c
173
+++ b/tcg/tcg-op-vec.c
197
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
174
@@ -XXX,XX +XXX,XX @@ bool tcg_can_emit_vecop_list(const TCGOpcode *list,
198
175
void vec_gen_2(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r, TCGArg a)
199
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
176
{
200
if (i == 0) {
177
TCGOp *op = tcg_emit_op(opc, 2);
201
- prev = (j == 0 ? tb->pc : 0);
178
- TCGOP_VECL(op) = type - TCG_TYPE_V64;
202
+ prev = (j == 0 ? tb_pc(tb) : 0);
179
+ TCGOP_TYPE(op) = type;
203
} else {
180
TCGOP_VECE(op) = vece;
204
prev = tcg_ctx->gen_insn_data[i - 1][j];
181
op->args[0] = r;
205
}
182
op->args[1] = a;
206
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
183
@@ -XXX,XX +XXX,XX @@ void vec_gen_3(TCGOpcode opc, TCGType type, unsigned vece,
207
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
184
TCGArg r, TCGArg a, TCGArg b)
208
uintptr_t searched_pc, bool reset_icount)
185
{
209
{
186
TCGOp *op = tcg_emit_op(opc, 3);
210
- target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
187
- TCGOP_VECL(op) = type - TCG_TYPE_V64;
211
+ target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) };
188
+ TCGOP_TYPE(op) = type;
212
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
189
TCGOP_VECE(op) = vece;
213
CPUArchState *env = cpu->env_ptr;
190
op->args[0] = r;
214
const uint8_t *p = tb->tc.ptr + tb->tc.size;
191
op->args[1] = a;
215
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
192
@@ -XXX,XX +XXX,XX @@ void vec_gen_4(TCGOpcode opc, TCGType type, unsigned vece,
216
const TranslationBlock *a = ap;
193
TCGArg r, TCGArg a, TCGArg b, TCGArg c)
217
const TranslationBlock *b = bp;
194
{
218
195
TCGOp *op = tcg_emit_op(opc, 4);
219
- return a->pc == b->pc &&
196
- TCGOP_VECL(op) = type - TCG_TYPE_V64;
220
+ return tb_pc(a) == tb_pc(b) &&
197
+ TCGOP_TYPE(op) = type;
221
a->cs_base == b->cs_base &&
198
TCGOP_VECE(op) = vece;
222
a->flags == b->flags &&
199
op->args[0] = r;
223
(tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
200
op->args[1] = a;
224
@@ -XXX,XX +XXX,XX @@ static void do_tb_invalidate_check(void *p, uint32_t hash, void *userp)
201
@@ -XXX,XX +XXX,XX @@ void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r,
225
TranslationBlock *tb = p;
202
TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e)
226
target_ulong addr = *(target_ulong *)userp;
203
{
227
204
TCGOp *op = tcg_emit_op(opc, 6);
228
- if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
205
- TCGOP_VECL(op) = type - TCG_TYPE_V64;
229
+ if (!(addr + TARGET_PAGE_SIZE <= tb_pc(tb) ||
206
+ TCGOP_TYPE(op) = type;
230
+ addr >= tb_pc(tb) + tb->size)) {
207
TCGOP_VECE(op) = vece;
231
printf("ERROR invalidate: address=" TARGET_FMT_lx
208
op->args[0] = r;
232
- " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
209
op->args[1] = a;
233
+ " PC=%08lx size=%04x\n", addr, (long)tb_pc(tb), tb->size);
210
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
211
index XXXXXXX..XXXXXXX 100644
212
--- a/tcg/tcg-op.c
213
+++ b/tcg/tcg-op.c
214
@@ -XXX,XX +XXX,XX @@
215
*/
216
#define NI __attribute__((noinline))
217
218
-TCGOp * NI tcg_gen_op1(TCGOpcode opc, TCGArg a1)
219
+TCGOp * NI tcg_gen_op1(TCGOpcode opc, TCGType type, TCGArg a1)
220
{
221
TCGOp *op = tcg_emit_op(opc, 1);
222
+ TCGOP_TYPE(op) = type;
223
op->args[0] = a1;
224
return op;
225
}
226
227
-TCGOp * NI tcg_gen_op2(TCGOpcode opc, TCGArg a1, TCGArg a2)
228
+TCGOp * NI tcg_gen_op2(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2)
229
{
230
TCGOp *op = tcg_emit_op(opc, 2);
231
+ TCGOP_TYPE(op) = type;
232
op->args[0] = a1;
233
op->args[1] = a2;
234
return op;
235
}
236
237
-TCGOp * NI tcg_gen_op3(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3)
238
+TCGOp * NI tcg_gen_op3(TCGOpcode opc, TCGType type, TCGArg a1,
239
+ TCGArg a2, TCGArg a3)
240
{
241
TCGOp *op = tcg_emit_op(opc, 3);
242
+ TCGOP_TYPE(op) = type;
243
op->args[0] = a1;
244
op->args[1] = a2;
245
op->args[2] = a3;
246
return op;
247
}
248
249
-TCGOp * NI tcg_gen_op4(TCGOpcode opc, TCGArg a1, TCGArg a2,
250
+TCGOp * NI tcg_gen_op4(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2,
251
TCGArg a3, TCGArg a4)
252
{
253
TCGOp *op = tcg_emit_op(opc, 4);
254
+ TCGOP_TYPE(op) = type;
255
op->args[0] = a1;
256
op->args[1] = a2;
257
op->args[2] = a3;
258
@@ -XXX,XX +XXX,XX @@ TCGOp * NI tcg_gen_op4(TCGOpcode opc, TCGArg a1, TCGArg a2,
259
return op;
260
}
261
262
-TCGOp * NI tcg_gen_op5(TCGOpcode opc, TCGArg a1, TCGArg a2,
263
+TCGOp * NI tcg_gen_op5(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2,
264
TCGArg a3, TCGArg a4, TCGArg a5)
265
{
266
TCGOp *op = tcg_emit_op(opc, 5);
267
+ TCGOP_TYPE(op) = type;
268
op->args[0] = a1;
269
op->args[1] = a2;
270
op->args[2] = a3;
271
@@ -XXX,XX +XXX,XX @@ TCGOp * NI tcg_gen_op5(TCGOpcode opc, TCGArg a1, TCGArg a2,
272
return op;
273
}
274
275
-TCGOp * NI tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
276
- TCGArg a4, TCGArg a5, TCGArg a6)
277
+TCGOp * NI tcg_gen_op6(TCGOpcode opc, TCGType type, TCGArg a1, TCGArg a2,
278
+ TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6)
279
{
280
TCGOp *op = tcg_emit_op(opc, 6);
281
+ TCGOP_TYPE(op) = type;
282
op->args[0] = a1;
283
op->args[1] = a2;
284
op->args[2] = a3;
285
@@ -XXX,XX +XXX,XX @@ TCGOp * NI tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
286
# define DNI
287
#endif
288
289
-static void DNI tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 a1)
290
+static void DNI tcg_gen_op1_i32(TCGOpcode opc, TCGType type, TCGv_i32 a1)
291
{
292
- tcg_gen_op1(opc, tcgv_i32_arg(a1));
293
+ tcg_gen_op1(opc, type, tcgv_i32_arg(a1));
294
}
295
296
-static void DNI tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 a1)
297
+static void DNI tcg_gen_op1_i64(TCGOpcode opc, TCGType type, TCGv_i64 a1)
298
{
299
- tcg_gen_op1(opc, tcgv_i64_arg(a1));
300
+ tcg_gen_op1(opc, type, tcgv_i64_arg(a1));
301
}
302
303
-static TCGOp * DNI tcg_gen_op1i(TCGOpcode opc, TCGArg a1)
304
+static TCGOp * DNI tcg_gen_op1i(TCGOpcode opc, TCGType type, TCGArg a1)
305
{
306
- return tcg_gen_op1(opc, a1);
307
+ return tcg_gen_op1(opc, type, a1);
308
}
309
310
static void DNI tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2)
311
{
312
- tcg_gen_op2(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2));
313
+ tcg_gen_op2(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2));
314
}
315
316
static void DNI tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2)
317
{
318
- tcg_gen_op2(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2));
319
+ tcg_gen_op2(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2));
320
}
321
322
static void DNI tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 a1,
323
TCGv_i32 a2, TCGv_i32 a3)
324
{
325
- tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), tcgv_i32_arg(a3));
326
+ tcg_gen_op3(opc, TCG_TYPE_I32, tcgv_i32_arg(a1),
327
+ tcgv_i32_arg(a2), tcgv_i32_arg(a3));
328
}
329
330
static void DNI tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 a1,
331
TCGv_i64 a2, TCGv_i64 a3)
332
{
333
- tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), tcgv_i64_arg(a3));
334
+ tcg_gen_op3(opc, TCG_TYPE_I64, tcgv_i64_arg(a1),
335
+ tcgv_i64_arg(a2), tcgv_i64_arg(a3));
336
}
337
338
static void DNI tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 a1,
339
TCGv_i32 a2, TCGArg a3)
340
{
341
- tcg_gen_op3(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3);
342
+ tcg_gen_op3(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3);
343
}
344
345
static void DNI tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 a1,
346
TCGv_i64 a2, TCGArg a3)
347
{
348
- tcg_gen_op3(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3);
349
+ tcg_gen_op3(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3);
350
}
351
352
static void DNI tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val,
353
TCGv_ptr base, TCGArg offset)
354
{
355
- tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_ptr_arg(base), offset);
356
+ tcg_gen_op3(opc, TCG_TYPE_I32, tcgv_i32_arg(val),
357
+ tcgv_ptr_arg(base), offset);
358
}
359
360
static void DNI tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val,
361
TCGv_ptr base, TCGArg offset)
362
{
363
- tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_ptr_arg(base), offset);
364
+ tcg_gen_op3(opc, TCG_TYPE_I64, tcgv_i64_arg(val),
365
+ tcgv_ptr_arg(base), offset);
366
}
367
368
static void DNI tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
369
TCGv_i32 a3, TCGv_i32 a4)
370
{
371
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
372
+ tcg_gen_op4(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
373
tcgv_i32_arg(a3), tcgv_i32_arg(a4));
374
}
375
376
static void DNI tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
377
TCGv_i64 a3, TCGv_i64 a4)
378
{
379
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
380
+ tcg_gen_op4(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
381
tcgv_i64_arg(a3), tcgv_i64_arg(a4));
382
}
383
384
static void DNI tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
385
TCGv_i32 a3, TCGArg a4)
386
{
387
- tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
388
+ tcg_gen_op4(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
389
tcgv_i32_arg(a3), a4);
390
}
391
392
static void DNI tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
393
TCGv_i64 a3, TCGArg a4)
394
{
395
- tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
396
+ tcg_gen_op4(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
397
tcgv_i64_arg(a3), a4);
398
}
399
400
static TCGOp * DNI tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
401
TCGArg a3, TCGArg a4)
402
{
403
- return tcg_gen_op4(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4);
404
+ return tcg_gen_op4(opc, TCG_TYPE_I32,
405
+ tcgv_i32_arg(a1), tcgv_i32_arg(a2), a3, a4);
406
}
407
408
static TCGOp * DNI tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
409
TCGArg a3, TCGArg a4)
410
{
411
- return tcg_gen_op4(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4);
412
+ return tcg_gen_op4(opc, TCG_TYPE_I64,
413
+ tcgv_i64_arg(a1), tcgv_i64_arg(a2), a3, a4);
414
}
415
416
static void DNI tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
417
TCGv_i32 a3, TCGv_i32 a4, TCGv_i32 a5)
418
{
419
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
420
+ tcg_gen_op5(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
421
tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5));
422
}
423
424
static void DNI tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
425
TCGv_i64 a3, TCGv_i64 a4, TCGv_i64 a5)
426
{
427
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
428
+ tcg_gen_op5(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
429
tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5));
430
}
431
432
static void DNI tcg_gen_op5ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
433
TCGv_i32 a3, TCGArg a4, TCGArg a5)
434
{
435
- tcg_gen_op5(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
436
+ tcg_gen_op5(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
437
tcgv_i32_arg(a3), a4, a5);
438
}
439
440
static void DNI tcg_gen_op5ii_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
441
TCGv_i64 a3, TCGArg a4, TCGArg a5)
442
{
443
- tcg_gen_op5(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
444
+ tcg_gen_op5(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
445
tcgv_i64_arg(a3), a4, a5);
446
}
447
448
@@ -XXX,XX +XXX,XX @@ static void DNI tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
449
TCGv_i32 a3, TCGv_i32 a4,
450
TCGv_i32 a5, TCGv_i32 a6)
451
{
452
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
453
+ tcg_gen_op6(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
454
tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5),
455
tcgv_i32_arg(a6));
456
}
457
@@ -XXX,XX +XXX,XX @@ static void DNI tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
458
TCGv_i64 a3, TCGv_i64 a4,
459
TCGv_i64 a5, TCGv_i64 a6)
460
{
461
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
462
+ tcg_gen_op6(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
463
tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5),
464
tcgv_i64_arg(a6));
465
}
466
@@ -XXX,XX +XXX,XX @@ static void DNI tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
467
TCGv_i32 a3, TCGv_i32 a4,
468
TCGv_i32 a5, TCGArg a6)
469
{
470
- tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
471
+ tcg_gen_op6(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
472
tcgv_i32_arg(a3), tcgv_i32_arg(a4), tcgv_i32_arg(a5), a6);
473
}
474
475
@@ -XXX,XX +XXX,XX @@ static void DNI tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 a1, TCGv_i64 a2,
476
TCGv_i64 a3, TCGv_i64 a4,
477
TCGv_i64 a5, TCGArg a6)
478
{
479
- tcg_gen_op6(opc, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
480
+ tcg_gen_op6(opc, TCG_TYPE_I64, tcgv_i64_arg(a1), tcgv_i64_arg(a2),
481
tcgv_i64_arg(a3), tcgv_i64_arg(a4), tcgv_i64_arg(a5), a6);
482
}
483
484
@@ -XXX,XX +XXX,XX @@ static TCGOp * DNI tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
485
TCGv_i32 a3, TCGv_i32 a4,
486
TCGArg a5, TCGArg a6)
487
{
488
- return tcg_gen_op6(opc, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
489
+ return tcg_gen_op6(opc, TCG_TYPE_I32, tcgv_i32_arg(a1), tcgv_i32_arg(a2),
490
tcgv_i32_arg(a3), tcgv_i32_arg(a4), a5, a6);
491
}
492
493
@@ -XXX,XX +XXX,XX @@ static TCGOp * DNI tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 a1, TCGv_i32 a2,
494
void gen_set_label(TCGLabel *l)
495
{
496
l->present = 1;
497
- tcg_gen_op1(INDEX_op_set_label, label_arg(l));
498
+ tcg_gen_op1(INDEX_op_set_label, 0, label_arg(l));
499
}
500
501
static void add_as_label_use(TCGLabel *l, TCGOp *op)
502
@@ -XXX,XX +XXX,XX @@ static void add_as_label_use(TCGLabel *l, TCGOp *op)
503
504
void tcg_gen_br(TCGLabel *l)
505
{
506
- add_as_label_use(l, tcg_gen_op1(INDEX_op_br, label_arg(l)));
507
+ add_as_label_use(l, tcg_gen_op1(INDEX_op_br, 0, label_arg(l)));
508
}
509
510
void tcg_gen_mb(TCGBar mb_type)
511
@@ -XXX,XX +XXX,XX @@ void tcg_gen_mb(TCGBar mb_type)
512
#endif
513
514
if (parallel) {
515
- tcg_gen_op1(INDEX_op_mb, mb_type);
516
+ tcg_gen_op1(INDEX_op_mb, 0, mb_type);
234
}
517
}
235
}
518
}
236
519
237
@@ -XXX,XX +XXX,XX @@ static void do_tb_page_check(void *p, uint32_t hash, void *userp)
520
void tcg_gen_plugin_cb(unsigned from)
238
TranslationBlock *tb = p;
521
{
239
int flags1, flags2;
522
- tcg_gen_op1(INDEX_op_plugin_cb, from);
240
523
+ tcg_gen_op1(INDEX_op_plugin_cb, 0, from);
241
- flags1 = page_get_flags(tb->pc);
524
}
242
- flags2 = page_get_flags(tb->pc + tb->size - 1);
525
243
+ flags1 = page_get_flags(tb_pc(tb));
526
void tcg_gen_plugin_mem_cb(TCGv_i64 addr, unsigned meminfo)
244
+ flags2 = page_get_flags(tb_pc(tb) + tb->size - 1);
527
{
245
if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
528
- tcg_gen_op2(INDEX_op_plugin_mem_cb, tcgv_i64_arg(addr), meminfo);
246
printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
529
+ tcg_gen_op2(INDEX_op_plugin_mem_cb, 0, tcgv_i64_arg(addr), meminfo);
247
- (long)tb->pc, tb->size, flags1, flags2);
530
}
248
+ (long)tb_pc(tb), tb->size, flags1, flags2);
531
532
/* 32 bit ops */
533
534
void tcg_gen_discard_i32(TCGv_i32 arg)
535
{
536
- tcg_gen_op1_i32(INDEX_op_discard, arg);
537
+ tcg_gen_op1_i32(INDEX_op_discard, TCG_TYPE_I32, arg);
538
}
539
540
void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg)
541
@@ -XXX,XX +XXX,XX @@ void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, tcg_target_long offset)
542
void tcg_gen_discard_i64(TCGv_i64 arg)
543
{
544
if (TCG_TARGET_REG_BITS == 64) {
545
- tcg_gen_op1_i64(INDEX_op_discard, arg);
546
+ tcg_gen_op1_i64(INDEX_op_discard, TCG_TYPE_I64, arg);
547
} else {
548
tcg_gen_discard_i32(TCGV_LOW(arg));
549
tcg_gen_discard_i32(TCGV_HIGH(arg));
550
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
551
if (TCG_TARGET_REG_BITS == 32) {
552
tcg_gen_mov_i32(ret, TCGV_LOW(arg));
553
} else if (TCG_TARGET_HAS_extr_i64_i32) {
554
- tcg_gen_op2(INDEX_op_extrl_i64_i32,
555
+ tcg_gen_op2(INDEX_op_extrl_i64_i32, TCG_TYPE_I32,
556
tcgv_i32_arg(ret), tcgv_i64_arg(arg));
557
} else {
558
tcg_gen_mov_i32(ret, (TCGv_i32)arg);
559
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg)
560
if (TCG_TARGET_REG_BITS == 32) {
561
tcg_gen_mov_i32(ret, TCGV_HIGH(arg));
562
} else if (TCG_TARGET_HAS_extr_i64_i32) {
563
- tcg_gen_op2(INDEX_op_extrh_i64_i32,
564
+ tcg_gen_op2(INDEX_op_extrh_i64_i32, TCG_TYPE_I32,
565
tcgv_i32_arg(ret), tcgv_i64_arg(arg));
566
} else {
567
TCGv_i64 t = tcg_temp_ebb_new_i64();
568
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
569
tcg_gen_mov_i32(TCGV_LOW(ret), arg);
570
tcg_gen_movi_i32(TCGV_HIGH(ret), 0);
571
} else {
572
- tcg_gen_op2(INDEX_op_extu_i32_i64,
573
+ tcg_gen_op2(INDEX_op_extu_i32_i64, TCG_TYPE_I64,
574
tcgv_i64_arg(ret), tcgv_i32_arg(arg));
249
}
575
}
250
}
576
}
251
577
@@ -XXX,XX +XXX,XX @@ void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg)
252
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
578
tcg_gen_mov_i32(TCGV_LOW(ret), arg);
253
579
tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31);
254
/* remove the TB from the hash list */
580
} else {
255
phys_pc = tb->page_addr[0];
581
- tcg_gen_op2(INDEX_op_ext_i32_i64,
256
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
582
+ tcg_gen_op2(INDEX_op_ext_i32_i64, TCG_TYPE_I64,
257
+ h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags,
583
tcgv_i64_arg(ret), tcgv_i32_arg(arg));
258
tb->trace_vcpu_dstate);
259
if (!qht_remove(&tb_ctx.htable, tb, h)) {
260
return;
261
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
262
}
584
}
263
585
}
264
/* add in the hash table */
586
@@ -XXX,XX +XXX,XX @@ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
265
- h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags,
587
tcg_debug_assert(idx == TB_EXIT_REQUESTED);
266
+ h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags,
588
}
267
tb->trace_vcpu_dstate);
589
268
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
590
- tcg_gen_op1i(INDEX_op_exit_tb, val);
269
591
+ tcg_gen_op1i(INDEX_op_exit_tb, 0, val);
270
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
592
}
271
tcg_ctx->cpu = NULL;
593
272
max_insns = tb->icount;
594
void tcg_gen_goto_tb(unsigned idx)
273
595
@@ -XXX,XX +XXX,XX @@ void tcg_gen_goto_tb(unsigned idx)
274
- trace_translate_block(tb, tb->pc, tb->tc.ptr);
596
tcg_ctx->goto_tb_issue_mask |= 1 << idx;
275
+ trace_translate_block(tb, pc, tb->tc.ptr);
276
277
/* generate machine code */
278
tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
279
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
280
ti = profile_getclock();
281
#endif
597
#endif
282
598
plugin_gen_disable_mem_helpers();
283
- gen_code_size = tcg_gen_code(tcg_ctx, tb);
599
- tcg_gen_op1i(INDEX_op_goto_tb, idx);
284
+ gen_code_size = tcg_gen_code(tcg_ctx, tb, pc);
600
+ tcg_gen_op1i(INDEX_op_goto_tb, 0, idx);
285
if (unlikely(gen_code_size < 0)) {
601
}
286
error_return:
602
287
switch (gen_code_size) {
603
void tcg_gen_lookup_and_goto_ptr(void)
288
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
604
@@ -XXX,XX +XXX,XX @@ void tcg_gen_lookup_and_goto_ptr(void)
289
605
plugin_gen_disable_mem_helpers();
290
#ifdef DEBUG_DISAS
606
ptr = tcg_temp_ebb_new_ptr();
291
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
607
gen_helper_lookup_tb_ptr(ptr, tcg_env);
292
- qemu_log_in_addr_range(tb->pc)) {
608
- tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr));
293
+ qemu_log_in_addr_range(pc)) {
609
+ tcg_gen_op1i(INDEX_op_goto_ptr, TCG_TYPE_PTR, tcgv_ptr_arg(ptr));
294
FILE *logfile = qemu_log_trylock();
610
tcg_temp_free_ptr(ptr);
295
if (logfile) {
611
}
296
int code_size, data_size;
297
@@ -XXX,XX +XXX,XX @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
298
*/
299
cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
300
301
- qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
302
- "cpu_io_recompile: rewound execution of TB to "
303
- TARGET_FMT_lx "\n", tb->pc);
304
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
305
+ target_ulong pc = log_pc(cpu, tb);
306
+ if (qemu_log_in_addr_range(pc)) {
307
+ qemu_log("cpu_io_recompile: rewound execution of TB to "
308
+ TARGET_FMT_lx "\n", pc);
309
+ }
310
+ }
311
312
cpu_loop_exit_noexc(cpu);
313
}
314
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
315
index XXXXXXX..XXXXXXX 100644
316
--- a/target/arm/cpu.c
317
+++ b/target/arm/cpu.c
318
@@ -XXX,XX +XXX,XX @@ void arm_cpu_synchronize_from_tb(CPUState *cs,
319
* never possible for an AArch64 TB to chain to an AArch32 TB.
320
*/
321
if (is_a64(env)) {
322
- env->pc = tb->pc;
323
+ env->pc = tb_pc(tb);
324
} else {
325
- env->regs[15] = tb->pc;
326
+ env->regs[15] = tb_pc(tb);
327
}
328
}
329
#endif /* CONFIG_TCG */
330
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
331
index XXXXXXX..XXXXXXX 100644
332
--- a/target/avr/cpu.c
333
+++ b/target/avr/cpu.c
334
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_synchronize_from_tb(CPUState *cs,
335
AVRCPU *cpu = AVR_CPU(cs);
336
CPUAVRState *env = &cpu->env;
337
338
- env->pc_w = tb->pc / 2; /* internally PC points to words */
339
+ env->pc_w = tb_pc(tb) / 2; /* internally PC points to words */
340
}
341
342
static void avr_cpu_reset(DeviceState *ds)
343
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/hexagon/cpu.c
346
+++ b/target/hexagon/cpu.c
347
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
348
{
349
HexagonCPU *cpu = HEXAGON_CPU(cs);
350
CPUHexagonState *env = &cpu->env;
351
- env->gpr[HEX_REG_PC] = tb->pc;
352
+ env->gpr[HEX_REG_PC] = tb_pc(tb);
353
}
354
355
static bool hexagon_cpu_has_work(CPUState *cs)
356
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
357
index XXXXXXX..XXXXXXX 100644
358
--- a/target/hppa/cpu.c
359
+++ b/target/hppa/cpu.c
360
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
361
HPPACPU *cpu = HPPA_CPU(cs);
362
363
#ifdef CONFIG_USER_ONLY
364
- cpu->env.iaoq_f = tb->pc;
365
+ cpu->env.iaoq_f = tb_pc(tb);
366
cpu->env.iaoq_b = tb->cs_base;
367
#else
368
/* Recover the IAOQ values from the GVA + PRIV. */
369
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_synchronize_from_tb(CPUState *cs,
370
int32_t diff = cs_base;
371
372
cpu->env.iasq_f = iasq_f;
373
- cpu->env.iaoq_f = (tb->pc & ~iasq_f) + priv;
374
+ cpu->env.iaoq_f = (tb_pc(tb) & ~iasq_f) + priv;
375
if (diff) {
376
cpu->env.iaoq_b = cpu->env.iaoq_f + diff;
377
}
378
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
379
index XXXXXXX..XXXXXXX 100644
380
--- a/target/i386/tcg/tcg-cpu.c
381
+++ b/target/i386/tcg/tcg-cpu.c
382
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
383
{
384
X86CPU *cpu = X86_CPU(cs);
385
386
- cpu->env.eip = tb->pc - tb->cs_base;
387
+ cpu->env.eip = tb_pc(tb) - tb->cs_base;
388
}
389
390
#ifndef CONFIG_USER_ONLY
391
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
392
index XXXXXXX..XXXXXXX 100644
393
--- a/target/loongarch/cpu.c
394
+++ b/target/loongarch/cpu.c
395
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
396
LoongArchCPU *cpu = LOONGARCH_CPU(cs);
397
CPULoongArchState *env = &cpu->env;
398
399
- env->pc = tb->pc;
400
+ env->pc = tb_pc(tb);
401
}
402
#endif /* CONFIG_TCG */
403
404
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
405
index XXXXXXX..XXXXXXX 100644
406
--- a/target/microblaze/cpu.c
407
+++ b/target/microblaze/cpu.c
408
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_synchronize_from_tb(CPUState *cs,
409
{
410
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
411
412
- cpu->env.pc = tb->pc;
413
+ cpu->env.pc = tb_pc(tb);
414
cpu->env.iflags = tb->flags & IFLAGS_TB_MASK;
415
}
416
417
diff --git a/target/mips/tcg/exception.c b/target/mips/tcg/exception.c
418
index XXXXXXX..XXXXXXX 100644
419
--- a/target/mips/tcg/exception.c
420
+++ b/target/mips/tcg/exception.c
421
@@ -XXX,XX +XXX,XX @@ void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb)
422
MIPSCPU *cpu = MIPS_CPU(cs);
423
CPUMIPSState *env = &cpu->env;
424
425
- env->active_tc.PC = tb->pc;
426
+ env->active_tc.PC = tb_pc(tb);
427
env->hflags &= ~MIPS_HFLAG_BMASK;
428
env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
429
}
430
diff --git a/target/mips/tcg/sysemu/special_helper.c b/target/mips/tcg/sysemu/special_helper.c
431
index XXXXXXX..XXXXXXX 100644
432
--- a/target/mips/tcg/sysemu/special_helper.c
433
+++ b/target/mips/tcg/sysemu/special_helper.c
434
@@ -XXX,XX +XXX,XX @@ bool mips_io_recompile_replay_branch(CPUState *cs, const TranslationBlock *tb)
435
CPUMIPSState *env = &cpu->env;
436
437
if ((env->hflags & MIPS_HFLAG_BMASK) != 0
438
- && env->active_tc.PC != tb->pc) {
439
+ && env->active_tc.PC != tb_pc(tb)) {
440
env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
441
env->hflags &= ~MIPS_HFLAG_BMASK;
442
return true;
443
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/openrisc/cpu.c
446
+++ b/target/openrisc/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
448
{
449
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
450
451
- cpu->env.pc = tb->pc;
452
+ cpu->env.pc = tb_pc(tb);
453
}
454
455
456
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
457
index XXXXXXX..XXXXXXX 100644
458
--- a/target/riscv/cpu.c
459
+++ b/target/riscv/cpu.c
460
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_synchronize_from_tb(CPUState *cs,
461
RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
462
463
if (xl == MXL_RV32) {
464
- env->pc = (int32_t)tb->pc;
465
+ env->pc = (int32_t)tb_pc(tb);
466
} else {
467
- env->pc = tb->pc;
468
+ env->pc = tb_pc(tb);
469
}
470
}
471
472
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
473
index XXXXXXX..XXXXXXX 100644
474
--- a/target/rx/cpu.c
475
+++ b/target/rx/cpu.c
476
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_synchronize_from_tb(CPUState *cs,
477
{
478
RXCPU *cpu = RX_CPU(cs);
479
480
- cpu->env.pc = tb->pc;
481
+ cpu->env.pc = tb_pc(tb);
482
}
483
484
static bool rx_cpu_has_work(CPUState *cs)
485
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
486
index XXXXXXX..XXXXXXX 100644
487
--- a/target/sh4/cpu.c
488
+++ b/target/sh4/cpu.c
489
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_synchronize_from_tb(CPUState *cs,
490
{
491
SuperHCPU *cpu = SUPERH_CPU(cs);
492
493
- cpu->env.pc = tb->pc;
494
+ cpu->env.pc = tb_pc(tb);
495
cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
496
}
497
498
@@ -XXX,XX +XXX,XX @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
499
CPUSH4State *env = &cpu->env;
500
501
if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
502
- && env->pc != tb->pc) {
503
+ && env->pc != tb_pc(tb)) {
504
env->pc -= 2;
505
env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
506
return true;
507
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
508
index XXXXXXX..XXXXXXX 100644
509
--- a/target/sparc/cpu.c
510
+++ b/target/sparc/cpu.c
511
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_synchronize_from_tb(CPUState *cs,
512
{
513
SPARCCPU *cpu = SPARC_CPU(cs);
514
515
- cpu->env.pc = tb->pc;
516
+ cpu->env.pc = tb_pc(tb);
517
cpu->env.npc = tb->cs_base;
518
}
519
520
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
521
index XXXXXXX..XXXXXXX 100644
522
--- a/target/tricore/cpu.c
523
+++ b/target/tricore/cpu.c
524
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_synchronize_from_tb(CPUState *cs,
525
TriCoreCPU *cpu = TRICORE_CPU(cs);
526
CPUTriCoreState *env = &cpu->env;
527
528
- env->PC = tb->pc;
529
+ env->PC = tb_pc(tb);
530
}
531
532
static void tricore_cpu_reset(DeviceState *dev)
533
diff --git a/tcg/tcg.c b/tcg/tcg.c
612
diff --git a/tcg/tcg.c b/tcg/tcg.c
534
index XXXXXXX..XXXXXXX 100644
613
index XXXXXXX..XXXXXXX 100644
535
--- a/tcg/tcg.c
614
--- a/tcg/tcg.c
536
+++ b/tcg/tcg.c
615
+++ b/tcg/tcg.c
537
@@ -XXX,XX +XXX,XX @@ int64_t tcg_cpu_exec_time(void)
616
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
538
#endif
617
nb_cargs = def->nb_cargs;
539
618
540
619
if (def->flags & TCG_OPF_VECTOR) {
541
-int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
620
- col += ne_fprintf(f, "v%d,e%d,", 64 << TCGOP_VECL(op),
542
+int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
621
+ col += ne_fprintf(f, "v%d,e%d,",
543
{
622
+ 8 * tcg_type_size(TCGOP_TYPE(op)),
544
#ifdef CONFIG_PROFILER
623
8 << TCGOP_VECE(op));
545
TCGProfile *prof = &s->prof;
624
}
546
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
625
547
626
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
548
#ifdef DEBUG_DISAS
627
549
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)
628
itype = its->type;
550
- && qemu_log_in_addr_range(tb->pc))) {
629
vece = TCGOP_VECE(op);
551
+ && qemu_log_in_addr_range(pc_start))) {
630
- vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
552
FILE *logfile = qemu_log_trylock();
631
+ vtype = TCGOP_TYPE(op);
553
if (logfile) {
632
554
fprintf(logfile, "OP:\n");
633
if (its->val_type == TEMP_VAL_CONST) {
555
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
634
/* Propagate constant via movi -> dupi. */
556
if (s->nb_indirects > 0) {
635
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
557
#ifdef DEBUG_DISAS
636
break;
558
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_IND)
637
default:
559
- && qemu_log_in_addr_range(tb->pc))) {
638
if (def->flags & TCG_OPF_VECTOR) {
560
+ && qemu_log_in_addr_range(pc_start))) {
639
- tcg_out_vec_op(s, op->opc, TCGOP_VECL(op), TCGOP_VECE(op),
561
FILE *logfile = qemu_log_trylock();
640
- new_args, const_args);
562
if (logfile) {
641
+ tcg_out_vec_op(s, op->opc, TCGOP_TYPE(op) - TCG_TYPE_V64,
563
fprintf(logfile, "OP before indirect lowering:\n");
642
+ TCGOP_VECE(op), new_args, const_args);
564
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
643
} else {
565
644
tcg_out_op(s, op->opc, new_args, const_args);
566
#ifdef DEBUG_DISAS
645
}
567
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
646
@@ -XXX,XX +XXX,XX @@ static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
568
- && qemu_log_in_addr_range(tb->pc))) {
647
{
569
+ && qemu_log_in_addr_range(pc_start))) {
648
const TCGLifeData arg_life = op->life;
570
FILE *logfile = qemu_log_trylock();
649
TCGTemp *ots, *itsl, *itsh;
571
if (logfile) {
650
- TCGType vtype = TCGOP_VECL(op) + TCG_TYPE_V64;
572
fprintf(logfile, "OP after optimization and liveness analysis:\n");
651
+ TCGType vtype = TCGOP_TYPE(op);
652
653
/* This opcode is only valid for 32-bit hosts, for 64-bit elements. */
654
tcg_debug_assert(TCG_TARGET_REG_BITS == 32);
655
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
656
index XXXXXXX..XXXXXXX 100644
657
--- a/docs/devel/tcg-ops.rst
658
+++ b/docs/devel/tcg-ops.rst
659
@@ -XXX,XX +XXX,XX @@ QEMU specific operations
660
Host vector operations
661
----------------------
662
663
-All of the vector ops have two parameters, ``TCGOP_VECL`` & ``TCGOP_VECE``.
664
-The former specifies the length of the vector in log2 64-bit units; the
665
-latter specifies the length of the element (if applicable) in log2 8-bit units.
666
-E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
667
+All of the vector ops have two parameters, ``TCGOP_TYPE`` & ``TCGOP_VECE``.
668
+The former specifies the length of the vector as a TCGType; the latter
669
+specifies the length of the element (if applicable) in log2 8-bit units.
670
671
.. list-table::
672
673
@@ -XXX,XX +XXX,XX @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
674
675
* - dup_vec *v0*, *r1*
676
677
- - | Duplicate the low N bits of *r1* into VECL/VECE copies across *v0*.
678
+ - | Duplicate the low N bits of *r1* into TYPE/VECE copies across *v0*.
679
680
* - dupi_vec *v0*, *c*
681
682
@@ -XXX,XX +XXX,XX @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
683
684
* - dup2_vec *v0*, *r1*, *r2*
685
686
- - | Duplicate *r2*:*r1* into VECL/64 copies across *v0*. This opcode is
687
+ - | Duplicate *r2*:*r1* into TYPE/64 copies across *v0*. This opcode is
688
only present for 32-bit hosts.
689
690
* - add_vec *v0*, *v1*, *v2*
691
@@ -XXX,XX +XXX,XX @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
692
693
.. code-block:: c
694
695
- for (i = 0; i < VECL/VECE; ++i) {
696
+ for (i = 0; i < TYPE/VECE; ++i) {
697
v0[i] = v1[i] << s2;
698
}
699
700
@@ -XXX,XX +XXX,XX @@ E.g. VECL = 1 -> 64 << 1 -> v128, and VECE = 2 -> 1 << 2 -> i32.
701
702
.. code-block:: c
703
704
- for (i = 0; i < VECL/VECE; ++i) {
705
+ for (i = 0; i < TYPE/VECE; ++i) {
706
v0[i] = v1[i] << v2[i];
707
}
708
573
--
709
--
574
2.34.1
710
2.43.0
575
711
576
712
diff view generated by jsdifflib
New patch
1
These are not particularly useful outside of optimization passes.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/tcg/tcg.h | 4 ----
7
tcg/tcg-internal.h | 5 +++++
8
2 files changed, 5 insertions(+), 4 deletions(-)
9
10
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg.h
13
+++ b/include/tcg/tcg.h
14
@@ -XXX,XX +XXX,XX @@ void tcg_gen_call7(void *func, TCGHelperInfo *, TCGTemp *ret,
15
16
TCGOp *tcg_emit_op(TCGOpcode opc, unsigned nargs);
17
void tcg_op_remove(TCGContext *s, TCGOp *op);
18
-TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op,
19
- TCGOpcode opc, unsigned nargs);
20
-TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op,
21
- TCGOpcode opc, unsigned nargs);
22
23
/**
24
* tcg_remove_ops_after:
25
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/tcg-internal.h
28
+++ b/tcg/tcg-internal.h
29
@@ -XXX,XX +XXX,XX @@ void vec_gen_4(TCGOpcode, TCGType, unsigned, TCGArg, TCGArg, TCGArg, TCGArg);
30
void vec_gen_6(TCGOpcode opc, TCGType type, unsigned vece, TCGArg r,
31
TCGArg a, TCGArg b, TCGArg c, TCGArg d, TCGArg e);
32
33
+TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op,
34
+ TCGOpcode opc, unsigned nargs);
35
+TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op,
36
+ TCGOpcode opc, unsigned nargs);
37
+
38
#endif /* TCG_INTERNAL_H */
39
--
40
2.43.0
41
42
diff view generated by jsdifflib
New patch
1
Simplify use within the optimizers by defaulting the
2
new opcode to the same type as the old opcode.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg.c | 4 ++++
8
1 file changed, 4 insertions(+)
9
10
diff --git a/tcg/tcg.c b/tcg/tcg.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg.c
13
+++ b/tcg/tcg.c
14
@@ -XXX,XX +XXX,XX @@ TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *old_op,
15
TCGOpcode opc, unsigned nargs)
16
{
17
TCGOp *new_op = tcg_op_alloc(opc, nargs);
18
+
19
+ TCGOP_TYPE(new_op) = TCGOP_TYPE(old_op);
20
QTAILQ_INSERT_BEFORE(old_op, new_op, link);
21
return new_op;
22
}
23
@@ -XXX,XX +XXX,XX @@ TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *old_op,
24
TCGOpcode opc, unsigned nargs)
25
{
26
TCGOp *new_op = tcg_op_alloc(opc, nargs);
27
+
28
+ TCGOP_TYPE(new_op) = TCGOP_TYPE(old_op);
29
QTAILQ_INSERT_AFTER(&s->ops, old_op, new_op, link);
30
return new_op;
31
}
32
--
33
2.43.0
34
35
diff view generated by jsdifflib
New patch
1
To be used by some integer operations instead of,
2
or in addition to, a trailing constant argument.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/tcg/tcg.h | 1 +
7
1 file changed, 1 insertion(+)
8
9
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/include/tcg/tcg.h
12
+++ b/include/tcg/tcg.h
13
@@ -XXX,XX +XXX,XX @@ struct TCGOp {
14
#define TCGOP_CALLO(X) (X)->param2
15
16
#define TCGOP_TYPE(X) (X)->param1
17
+#define TCGOP_FLAGS(X) (X)->param2
18
#define TCGOP_VECE(X) (X)->param2
19
20
/* Make sure operands fit in the bitfields above. */
21
--
22
2.43.0
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
3
The class cast checkers are quite expensive and always on (unlike the
4
dynamic case who's checks are gated by CONFIG_QOM_CAST_DEBUG). To
5
avoid the overhead of repeatedly checking something which should never
6
change we cache the CPUClass reference for use in the hot code paths.
7
8
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20220811151413.3350684-3-alex.bennee@linaro.org>
11
Signed-off-by: Cédric Le Goater <clg@kaod.org>
12
Message-Id: <20220923084803.498337-3-clg@kaod.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
3
---
15
include/hw/core/cpu.h | 9 +++++++++
4
include/tcg/tcg.h | 7 ++++++-
16
cpu.c | 9 ++++-----
5
tcg/tcg.c | 11 +++++++----
17
2 files changed, 13 insertions(+), 5 deletions(-)
6
2 files changed, 13 insertions(+), 5 deletions(-)
18
7
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
8
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
20
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
21
--- a/include/hw/core/cpu.h
10
--- a/include/tcg/tcg.h
22
+++ b/include/hw/core/cpu.h
11
+++ b/include/tcg/tcg.h
23
@@ -XXX,XX +XXX,XX @@ typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
12
@@ -XXX,XX +XXX,XX @@ typedef struct TCGTargetOpDef {
24
*/
13
const char *args_ct_str[TCG_MAX_OP_ARGS];
25
#define CPU(obj) ((CPUState *)(obj))
14
} TCGTargetOpDef;
26
15
16
-bool tcg_op_supported(TCGOpcode op);
27
+/*
17
+/*
28
+ * The class checkers bring in CPU_GET_CLASS() which is potentially
18
+ * tcg_op_supported:
29
+ * expensive given the eventual call to
19
+ * Query if @op, for @type and @flags, is supported by the host
30
+ * object_class_dynamic_cast_assert(). Because of this the CPUState
20
+ * on which we are currently executing.
31
+ * has a cached value for the class in cs->cc which is set up in
32
+ * cpu_exec_realizefn() for use in hot code paths.
33
+ */
21
+ */
34
typedef struct CPUClass CPUClass;
22
+bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags);
35
DECLARE_CLASS_CHECKERS(CPUClass, CPU,
23
36
TYPE_CPU)
24
void tcg_gen_call0(void *func, TCGHelperInfo *, TCGTemp *ret);
37
@@ -XXX,XX +XXX,XX @@ struct qemu_work_item;
25
void tcg_gen_call1(void *func, TCGHelperInfo *, TCGTemp *ret, TCGTemp *);
38
struct CPUState {
26
diff --git a/tcg/tcg.c b/tcg/tcg.c
39
/*< private >*/
40
DeviceState parent_obj;
41
+ /* cache to avoid expensive CPU_GET_CLASS */
42
+ CPUClass *cc;
43
/*< public >*/
44
45
int nr_cores;
46
diff --git a/cpu.c b/cpu.c
47
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
48
--- a/cpu.c
28
--- a/tcg/tcg.c
49
+++ b/cpu.c
29
+++ b/tcg/tcg.c
50
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_cpu_common = {
30
@@ -XXX,XX +XXX,XX @@ TCGTemp *tcgv_i32_temp(TCGv_i32 v)
51
31
}
52
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
32
#endif /* CONFIG_DEBUG_TCG */
33
34
-/* Return true if OP may appear in the opcode stream.
35
- Test the runtime variable that controls each opcode. */
36
-bool tcg_op_supported(TCGOpcode op)
37
+/*
38
+ * Return true if OP may appear in the opcode stream with TYPE.
39
+ * Test the runtime variable that controls each opcode.
40
+ */
41
+bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
53
{
42
{
54
-#ifndef CONFIG_USER_ONLY
43
const bool have_vec
55
- CPUClass *cc = CPU_GET_CLASS(cpu);
44
= TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
56
-#endif
45
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
57
+ /* cache the cpu class for the hotpath */
46
/* fall through */
58
+ cpu->cc = CPU_GET_CLASS(cpu);
47
default:
59
48
/* Sanity check that we've not introduced any unhandled opcodes. */
60
cpu_list_add(cpu);
49
- tcg_debug_assert(tcg_op_supported(opc));
61
if (!accel_cpu_realizefn(cpu, errp)) {
50
+ tcg_debug_assert(tcg_op_supported(opc, TCGOP_TYPE(op),
62
@@ -XXX,XX +XXX,XX @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
51
+ TCGOP_FLAGS(op)));
63
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
52
/* Note: in order to speed up the code, it would be much
64
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
53
faster to have specialized register allocator functions for
65
}
54
some common argument patterns */
66
- if (cc->sysemu_ops->legacy_vmsd != NULL) {
67
- vmstate_register(NULL, cpu->cpu_index, cc->sysemu_ops->legacy_vmsd, cpu);
68
+ if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
69
+ vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
70
}
71
#endif /* CONFIG_USER_ONLY */
72
}
73
--
55
--
74
2.34.1
56
2.43.0
75
57
76
58
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
Rely on tcg-op-vec.c to expand the opcode if missing.
2
2
3
Before: 35.912 s ± 0.168 s
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
After: 35.565 s ± 0.087 s
5
6
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20220811151413.3350684-5-alex.bennee@linaro.org>
9
Signed-off-by: Cédric Le Goater <clg@kaod.org>
10
Message-Id: <20220923084803.498337-5-clg@kaod.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
5
---
13
accel/tcg/cputlb.c | 15 ++++++---------
6
target/arm/tcg/translate-sve.c | 20 ++++----------------
14
1 file changed, 6 insertions(+), 9 deletions(-)
7
1 file changed, 4 insertions(+), 16 deletions(-)
15
8
16
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
17
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
18
--- a/accel/tcg/cputlb.c
11
--- a/target/arm/tcg/translate-sve.c
19
+++ b/accel/tcg/cputlb.c
12
+++ b/target/arm/tcg/translate-sve.c
20
@@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
13
@@ -XXX,XX +XXX,XX @@ static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
21
static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
14
static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
22
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
15
TCGv_vec m, TCGv_vec k)
23
{
16
{
24
- CPUClass *cc = CPU_GET_CLASS(cpu);
17
- if (TCG_TARGET_HAS_bitsel_vec) {
25
bool ok;
18
- tcg_gen_not_vec(vece, n, n);
26
19
- tcg_gen_bitsel_vec(vece, d, k, n, m);
27
/*
20
- } else {
28
* This is not a probe, so only valid return is success; failure
21
- tcg_gen_andc_vec(vece, n, k, n);
29
* should result in exception + longjmp to the cpu loop.
22
- tcg_gen_andc_vec(vece, m, m, k);
30
*/
23
- tcg_gen_or_vec(vece, d, n, m);
31
- ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
24
- }
32
- access_type, mmu_idx, false, retaddr);
25
+ tcg_gen_not_vec(vece, n, n);
33
+ ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
26
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
34
+ access_type, mmu_idx, false, retaddr);
35
assert(ok);
36
}
27
}
37
28
38
@@ -XXX,XX +XXX,XX @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
29
static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
39
MMUAccessType access_type,
30
@@ -XXX,XX +XXX,XX @@ static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
40
int mmu_idx, uintptr_t retaddr)
31
static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
32
TCGv_vec m, TCGv_vec k)
41
{
33
{
42
- CPUClass *cc = CPU_GET_CLASS(cpu);
34
- if (TCG_TARGET_HAS_bitsel_vec) {
43
-
35
- tcg_gen_not_vec(vece, m, m);
44
- cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
36
- tcg_gen_bitsel_vec(vece, d, k, n, m);
45
+ cpu->cc->tcg_ops->do_unaligned_access(cpu, addr, access_type,
37
- } else {
46
+ mmu_idx, retaddr);
38
- tcg_gen_and_vec(vece, n, n, k);
39
- tcg_gen_or_vec(vece, m, m, k);
40
- tcg_gen_orc_vec(vece, d, n, m);
41
- }
42
+ tcg_gen_not_vec(vece, m, m);
43
+ tcg_gen_bitsel_vec(vece, d, k, n, m);
47
}
44
}
48
45
49
static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
46
static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
50
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
51
if (!tlb_hit_page(tlb_addr, page_addr)) {
52
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
53
CPUState *cs = env_cpu(env);
54
- CPUClass *cc = CPU_GET_CLASS(cs);
55
56
- if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
57
- mmu_idx, nonfault, retaddr)) {
58
+ if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
59
+ mmu_idx, nonfault, retaddr)) {
60
/* Non-faulting page table read failed. */
61
*phost = NULL;
62
return TLB_INVALID_MASK;
63
--
47
--
64
2.34.1
48
2.43.0
65
49
66
50
diff view generated by jsdifflib
New patch
1
Do not reference TCG_TARGET_HAS_* directly.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/arm/tcg/translate-a64.c | 10 ++++++----
7
target/arm/tcg/translate-sve.c | 2 +-
8
target/arm/tcg/translate.c | 2 +-
9
3 files changed, 8 insertions(+), 6 deletions(-)
10
11
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/arm/tcg/translate-a64.c
14
+++ b/target/arm/tcg/translate-a64.c
15
@@ -XXX,XX +XXX,XX @@ static bool trans_CCMP(DisasContext *s, arg_CCMP *a)
16
TCGv_i64 tcg_rn, tcg_y;
17
DisasCompare c;
18
unsigned nzcv;
19
+ bool has_andc;
20
21
/* Set T0 = !COND. */
22
arm_test_cc(&c, a->cond);
23
@@ -XXX,XX +XXX,XX @@ static bool trans_CCMP(DisasContext *s, arg_CCMP *a)
24
tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
25
26
nzcv = a->nzcv;
27
+ has_andc = tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0);
28
if (nzcv & 8) { /* N */
29
tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
30
} else {
31
- if (TCG_TARGET_HAS_andc_i32) {
32
+ if (has_andc) {
33
tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
34
} else {
35
tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
36
}
37
}
38
if (nzcv & 4) { /* Z */
39
- if (TCG_TARGET_HAS_andc_i32) {
40
+ if (has_andc) {
41
tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
42
} else {
43
tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
44
@@ -XXX,XX +XXX,XX @@ static bool trans_CCMP(DisasContext *s, arg_CCMP *a)
45
if (nzcv & 2) { /* C */
46
tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
47
} else {
48
- if (TCG_TARGET_HAS_andc_i32) {
49
+ if (has_andc) {
50
tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
51
} else {
52
tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
53
@@ -XXX,XX +XXX,XX @@ static bool trans_CCMP(DisasContext *s, arg_CCMP *a)
54
if (nzcv & 1) { /* V */
55
tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
56
} else {
57
- if (TCG_TARGET_HAS_andc_i32) {
58
+ if (has_andc) {
59
tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
60
} else {
61
tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
62
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/target/arm/tcg/translate-sve.c
65
+++ b/target/arm/tcg/translate-sve.c
66
@@ -XXX,XX +XXX,XX @@ static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
67
* = | ~(m | k)
68
*/
69
tcg_gen_and_i64(n, n, k);
70
- if (TCG_TARGET_HAS_orc_i64) {
71
+ if (tcg_op_supported(INDEX_op_orc_i64, TCG_TYPE_I64, 0)) {
72
tcg_gen_or_i64(m, m, k);
73
tcg_gen_orc_i64(d, n, m);
74
} else {
75
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
76
index XXXXXXX..XXXXXXX 100644
77
--- a/target/arm/tcg/translate.c
78
+++ b/target/arm/tcg/translate.c
79
@@ -XXX,XX +XXX,XX @@ static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
80
static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
81
{
82
TCGv_i32 tmp = tcg_temp_new_i32();
83
- if (TCG_TARGET_HAS_add2_i32) {
84
+ if (tcg_op_supported(INDEX_op_add2_i32, TCG_TYPE_I32, 0)) {
85
tcg_gen_movi_i32(tmp, 0);
86
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
87
tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
88
--
89
2.43.0
90
91
diff view generated by jsdifflib
New patch
1
Do not reference TCG_TARGET_HAS_* directly.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/tricore/translate.c | 4 ++--
7
1 file changed, 2 insertions(+), 2 deletions(-)
8
9
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/tricore/translate.c
12
+++ b/target/tricore/translate.c
13
@@ -XXX,XX +XXX,XX @@ static void decode_bit_andacc(DisasContext *ctx)
14
pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
15
break;
16
case OPC2_32_BIT_AND_NOR_T:
17
- if (TCG_TARGET_HAS_andc_i32) {
18
+ if (tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0)) {
19
gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
20
pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
21
} else {
22
@@ -XXX,XX +XXX,XX @@ static void decode_bit_orand(DisasContext *ctx)
23
pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl);
24
break;
25
case OPC2_32_BIT_OR_NOR_T:
26
- if (TCG_TARGET_HAS_orc_i32) {
27
+ if (tcg_op_supported(INDEX_op_orc_i32, TCG_TYPE_I32, 0)) {
28
gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
29
pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl);
30
} else {
31
--
32
2.43.0
33
34
diff view generated by jsdifflib
1
Prepare for targets to be able to produce TBs that can
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
run in more than one virtual context.
3
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
3
---
7
accel/tcg/internal.h | 4 +++
4
include/tcg/tcg.h | 6 ++++++
8
accel/tcg/tb-jmp-cache.h | 41 +++++++++++++++++++++++++
5
tcg/tcg.c | 21 +++++++++++++++++++++
9
include/exec/cpu-defs.h | 3 ++
6
2 files changed, 27 insertions(+)
10
include/exec/exec-all.h | 32 ++++++++++++++++++--
11
accel/tcg/cpu-exec.c | 16 ++++++----
12
accel/tcg/translate-all.c | 64 ++++++++++++++++++++++++++-------------
13
6 files changed, 131 insertions(+), 29 deletions(-)
14
7
15
diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
8
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
16
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/internal.h
10
--- a/include/tcg/tcg.h
18
+++ b/accel/tcg/internal.h
11
+++ b/include/tcg/tcg.h
19
@@ -XXX,XX +XXX,XX @@ void tb_htable_init(void);
12
@@ -XXX,XX +XXX,XX @@ typedef struct TCGTargetOpDef {
20
/* Return the current PC from CPU, which may be cached in TB. */
13
* on which we are currently executing.
21
static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
14
*/
22
{
15
bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags);
23
+#if TARGET_TB_PCREL
16
+/*
24
+ return cpu->cc->get_pc(cpu);
17
+ * tcg_op_deposit_valid:
25
+#else
18
+ * Query if a deposit into (ofs, len) is supported for @type by
26
return tb_pc(tb);
19
+ * the host on which we are currently executing.
27
+#endif
20
+ */
21
+bool tcg_op_deposit_valid(TCGType type, unsigned ofs, unsigned len);
22
23
void tcg_gen_call0(void *func, TCGHelperInfo *, TCGTemp *ret);
24
void tcg_gen_call1(void *func, TCGHelperInfo *, TCGTemp *ret, TCGTemp *);
25
diff --git a/tcg/tcg.c b/tcg/tcg.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/tcg.c
28
+++ b/tcg/tcg.c
29
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
30
}
28
}
31
}
29
32
30
#endif /* ACCEL_TCG_INTERNAL_H */
33
+bool tcg_op_deposit_valid(TCGType type, unsigned ofs, unsigned len)
31
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/accel/tcg/tb-jmp-cache.h
34
+++ b/accel/tcg/tb-jmp-cache.h
35
@@ -XXX,XX +XXX,XX @@
36
37
/*
38
* Accessed in parallel; all accesses to 'tb' must be atomic.
39
+ * For TARGET_TB_PCREL, accesses to 'pc' must be protected by
40
+ * a load_acquire/store_release to 'tb'.
41
*/
42
struct CPUJumpCache {
43
struct {
44
TranslationBlock *tb;
45
+#if TARGET_TB_PCREL
46
+ target_ulong pc;
47
+#endif
48
} array[TB_JMP_CACHE_SIZE];
49
};
50
51
+static inline TranslationBlock *
52
+tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t hash)
53
+{
34
+{
54
+#if TARGET_TB_PCREL
35
+ tcg_debug_assert(len > 0);
55
+ /* Use acquire to ensure current load of pc from jc. */
36
+ switch (type) {
56
+ return qatomic_load_acquire(&jc->array[hash].tb);
37
+ case TCG_TYPE_I32:
57
+#else
38
+ tcg_debug_assert(ofs < 32);
58
+ /* Use rcu_read to ensure current load of pc from *tb. */
39
+ tcg_debug_assert(len <= 32);
59
+ return qatomic_rcu_read(&jc->array[hash].tb);
40
+ tcg_debug_assert(ofs + len <= 32);
60
+#endif
41
+ return TCG_TARGET_HAS_deposit_i32 &&
61
+}
42
+ TCG_TARGET_deposit_i32_valid(ofs, len);
62
+
43
+ case TCG_TYPE_I64:
63
+static inline target_ulong
44
+ tcg_debug_assert(ofs < 64);
64
+tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
45
+ tcg_debug_assert(len <= 64);
65
+{
46
+ tcg_debug_assert(ofs + len <= 64);
66
+#if TARGET_TB_PCREL
47
+ return TCG_TARGET_HAS_deposit_i64 &&
67
+ return jc->array[hash].pc;
48
+ TCG_TARGET_deposit_i64_valid(ofs, len);
68
+#else
49
+ default:
69
+ return tb_pc(tb);
50
+ g_assert_not_reached();
70
+#endif
71
+}
72
+
73
+static inline void
74
+tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
75
+ TranslationBlock *tb, target_ulong pc)
76
+{
77
+#if TARGET_TB_PCREL
78
+ jc->array[hash].pc = pc;
79
+ /* Use store_release on tb to ensure pc is written first. */
80
+ qatomic_store_release(&jc->array[hash].tb, tb);
81
+#else
82
+ /* Use the pc value already stored in tb->pc. */
83
+ qatomic_set(&jc->array[hash].tb, tb);
84
+#endif
85
+}
86
+
87
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
88
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
89
index XXXXXXX..XXXXXXX 100644
90
--- a/include/exec/cpu-defs.h
91
+++ b/include/exec/cpu-defs.h
92
@@ -XXX,XX +XXX,XX @@
93
# error TARGET_PAGE_BITS must be defined in cpu-param.h
94
# endif
95
#endif
96
+#ifndef TARGET_TB_PCREL
97
+# define TARGET_TB_PCREL 0
98
+#endif
99
100
#define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8)
101
102
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
103
index XXXXXXX..XXXXXXX 100644
104
--- a/include/exec/exec-all.h
105
+++ b/include/exec/exec-all.h
106
@@ -XXX,XX +XXX,XX @@ struct tb_tc {
107
};
108
109
struct TranslationBlock {
110
- target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
111
- target_ulong cs_base; /* CS base for this block */
112
+#if !TARGET_TB_PCREL
113
+ /*
114
+ * Guest PC corresponding to this block. This must be the true
115
+ * virtual address. Therefore e.g. x86 stores EIP + CS_BASE, and
116
+ * targets like Arm, MIPS, HP-PA, which reuse low bits for ISA or
117
+ * privilege, must store those bits elsewhere.
118
+ *
119
+ * If TARGET_TB_PCREL, the opcodes for the TranslationBlock are
120
+ * written such that the TB is associated only with the physical
121
+ * page and may be run in any virtual address context. In this case,
122
+ * PC must always be taken from ENV in a target-specific manner.
123
+ * Unwind information is taken as offsets from the page, to be
124
+ * deposited into the "current" PC.
125
+ */
126
+ target_ulong pc;
127
+#endif
128
+
129
+ /*
130
+ * Target-specific data associated with the TranslationBlock, e.g.:
131
+ * x86: the original user, the Code Segment virtual base,
132
+ * arm: an extension of tb->flags,
133
+ * s390x: instruction data for EXECUTE,
134
+ * sparc: the next pc of the instruction queue (for delay slots).
135
+ */
136
+ target_ulong cs_base;
137
+
138
uint32_t flags; /* flags defining in which context the code was generated */
139
uint32_t cflags; /* compile flags */
140
141
@@ -XXX,XX +XXX,XX @@ struct TranslationBlock {
142
/* Hide the read to avoid ifdefs for TARGET_TB_PCREL. */
143
static inline target_ulong tb_pc(const TranslationBlock *tb)
144
{
145
+#if TARGET_TB_PCREL
146
+ qemu_build_not_reached();
147
+#else
148
return tb->pc;
149
+#endif
150
}
151
152
/* Hide the qatomic_read to make code a little easier on the eyes */
153
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
154
index XXXXXXX..XXXXXXX 100644
155
--- a/accel/tcg/cpu-exec.c
156
+++ b/accel/tcg/cpu-exec.c
157
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
158
const TranslationBlock *tb = p;
159
const struct tb_desc *desc = d;
160
161
- if (tb_pc(tb) == desc->pc &&
162
+ if ((TARGET_TB_PCREL || tb_pc(tb) == desc->pc) &&
163
tb->page_addr[0] == desc->page_addr0 &&
164
tb->cs_base == desc->cs_base &&
165
tb->flags == desc->flags &&
166
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
167
return NULL;
168
}
169
desc.page_addr0 = phys_pc;
170
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
171
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : pc),
172
+ flags, cflags, *cpu->trace_dstate);
173
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
174
}
175
176
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
177
uint32_t flags, uint32_t cflags)
178
{
179
TranslationBlock *tb;
180
+ CPUJumpCache *jc;
181
uint32_t hash;
182
183
/* we should never be trying to look up an INVALID tb */
184
tcg_debug_assert(!(cflags & CF_INVALID));
185
186
hash = tb_jmp_cache_hash_func(pc);
187
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
188
+ jc = cpu->tb_jmp_cache;
189
+ tb = tb_jmp_cache_get_tb(jc, hash);
190
191
if (likely(tb &&
192
- tb->pc == pc &&
193
+ tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
194
tb->cs_base == cs_base &&
195
tb->flags == flags &&
196
tb->trace_vcpu_dstate == *cpu->trace_dstate &&
197
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
198
if (tb == NULL) {
199
return NULL;
200
}
201
- qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
202
+ tb_jmp_cache_set(jc, hash, tb, pc);
203
return tb;
204
}
205
206
@@ -XXX,XX +XXX,XX @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
207
if (cc->tcg_ops->synchronize_from_tb) {
208
cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
209
} else {
210
+ assert(!TARGET_TB_PCREL);
211
assert(cc->set_pc);
212
cc->set_pc(cpu, tb_pc(last_tb));
213
}
214
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
215
* for the fast lookup
216
*/
217
h = tb_jmp_cache_hash_func(pc);
218
- qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
219
+ tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
220
}
221
222
#ifndef CONFIG_USER_ONLY
223
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
224
index XXXXXXX..XXXXXXX 100644
225
--- a/accel/tcg/translate-all.c
226
+++ b/accel/tcg/translate-all.c
227
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
228
229
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
230
if (i == 0) {
231
- prev = (j == 0 ? tb_pc(tb) : 0);
232
+ prev = (!TARGET_TB_PCREL && j == 0 ? tb_pc(tb) : 0);
233
} else {
234
prev = tcg_ctx->gen_insn_data[i - 1][j];
235
}
236
@@ -XXX,XX +XXX,XX @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
237
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
238
uintptr_t searched_pc, bool reset_icount)
239
{
240
- target_ulong data[TARGET_INSN_START_WORDS] = { tb_pc(tb) };
241
+ target_ulong data[TARGET_INSN_START_WORDS];
242
uintptr_t host_pc = (uintptr_t)tb->tc.ptr;
243
CPUArchState *env = cpu->env_ptr;
244
const uint8_t *p = tb->tc.ptr + tb->tc.size;
245
@@ -XXX,XX +XXX,XX @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
246
return -1;
247
}
248
249
+ memset(data, 0, sizeof(data));
250
+ if (!TARGET_TB_PCREL) {
251
+ data[0] = tb_pc(tb);
252
+ }
253
+
254
/* Reconstruct the stored insn data while looking for the point at
255
which the end of the insn exceeds the searched_pc. */
256
for (i = 0; i < num_insns; ++i) {
257
@@ -XXX,XX +XXX,XX @@ static bool tb_cmp(const void *ap, const void *bp)
258
const TranslationBlock *a = ap;
259
const TranslationBlock *b = bp;
260
261
- return tb_pc(a) == tb_pc(b) &&
262
- a->cs_base == b->cs_base &&
263
- a->flags == b->flags &&
264
- (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
265
- a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
266
- a->page_addr[0] == b->page_addr[0] &&
267
- a->page_addr[1] == b->page_addr[1];
268
+ return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
269
+ a->cs_base == b->cs_base &&
270
+ a->flags == b->flags &&
271
+ (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
272
+ a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
273
+ a->page_addr[0] == b->page_addr[0] &&
274
+ a->page_addr[1] == b->page_addr[1]);
275
}
276
277
void tb_htable_init(void)
278
@@ -XXX,XX +XXX,XX @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
279
qemu_spin_unlock(&dest->jmp_lock);
280
}
281
282
+static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
283
+{
284
+ CPUState *cpu;
285
+
286
+ if (TARGET_TB_PCREL) {
287
+ /* A TB may be at any virtual address */
288
+ CPU_FOREACH(cpu) {
289
+ tcg_flush_jmp_cache(cpu);
290
+ }
291
+ } else {
292
+ uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
293
+
294
+ CPU_FOREACH(cpu) {
295
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
296
+
297
+ if (qatomic_read(&jc->array[h].tb) == tb) {
298
+ qatomic_set(&jc->array[h].tb, NULL);
299
+ }
300
+ }
301
+ }
51
+ }
302
+}
52
+}
303
+
53
+
304
/*
54
static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
305
* In user-mode, call with mmap_lock held.
55
306
* In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
56
static void tcg_gen_callN(void *func, TCGHelperInfo *info,
307
@@ -XXX,XX +XXX,XX @@ static inline void tb_jmp_unlink(TranslationBlock *dest)
308
*/
309
static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
310
{
311
- CPUState *cpu;
312
PageDesc *p;
313
uint32_t h;
314
tb_page_addr_t phys_pc;
315
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
316
317
/* remove the TB from the hash list */
318
phys_pc = tb->page_addr[0];
319
- h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, orig_cflags,
320
- tb->trace_vcpu_dstate);
321
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
322
+ tb->flags, orig_cflags, tb->trace_vcpu_dstate);
323
if (!qht_remove(&tb_ctx.htable, tb, h)) {
324
return;
325
}
326
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
327
}
328
329
/* remove the TB from the hash list */
330
- h = tb_jmp_cache_hash_func(tb->pc);
331
- CPU_FOREACH(cpu) {
332
- CPUJumpCache *jc = cpu->tb_jmp_cache;
333
- if (qatomic_read(&jc->array[h].tb) == tb) {
334
- qatomic_set(&jc->array[h].tb, NULL);
335
- }
336
- }
337
+ tb_jmp_cache_inval_tb(tb);
338
339
/* suppress this TB from the two jump lists */
340
tb_remove_from_jmp_list(tb, 0);
341
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
342
}
343
344
/* add in the hash table */
345
- h = tb_hash_func(phys_pc, tb_pc(tb), tb->flags, tb->cflags,
346
- tb->trace_vcpu_dstate);
347
+ h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
348
+ tb->flags, tb->cflags, tb->trace_vcpu_dstate);
349
qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
350
351
/* remove TB from the page(s) if we couldn't insert it */
352
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
353
354
gen_code_buf = tcg_ctx->code_gen_ptr;
355
tb->tc.ptr = tcg_splitwx_to_rx(gen_code_buf);
356
+#if !TARGET_TB_PCREL
357
tb->pc = pc;
358
+#endif
359
tb->cs_base = cs_base;
360
tb->flags = flags;
361
tb->cflags = cflags;
362
--
57
--
363
2.34.1
58
2.43.0
364
59
365
60
diff view generated by jsdifflib
New patch
1
This macro is unused.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/i386/tcg/emit.c.inc | 2 --
7
1 file changed, 2 deletions(-)
8
9
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/i386/tcg/emit.c.inc
12
+++ b/target/i386/tcg/emit.c.inc
13
@@ -XXX,XX +XXX,XX @@
14
#ifdef TARGET_X86_64
15
#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i64
16
#define TCG_TARGET_deposit_tl_valid TCG_TARGET_deposit_i64_valid
17
-#define TCG_TARGET_extract_tl_valid TCG_TARGET_extract_i64_valid
18
#else
19
#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i32
20
#define TCG_TARGET_deposit_tl_valid TCG_TARGET_deposit_i32_valid
21
-#define TCG_TARGET_extract_tl_valid TCG_TARGET_extract_i32_valid
22
#endif
23
24
#define MMX_OFFSET(reg) \
25
--
26
2.43.0
27
28
diff view generated by jsdifflib
New patch
1
Avoid direct usage of TCG_TARGET_deposit_*_valid.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/i386/tcg/emit.c.inc | 6 ++----
7
1 file changed, 2 insertions(+), 4 deletions(-)
8
9
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/i386/tcg/emit.c.inc
12
+++ b/target/i386/tcg/emit.c.inc
13
@@ -XXX,XX +XXX,XX @@
14
*/
15
#ifdef TARGET_X86_64
16
#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i64
17
-#define TCG_TARGET_deposit_tl_valid TCG_TARGET_deposit_i64_valid
18
#else
19
#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i32
20
-#define TCG_TARGET_deposit_tl_valid TCG_TARGET_deposit_i32_valid
21
#endif
22
23
#define MMX_OFFSET(reg) \
24
@@ -XXX,XX +XXX,XX @@ static void gen_RCL(DisasContext *s, X86DecodedInsn *decode)
25
}
26
27
/* Compute high part, including incoming carry. */
28
- if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) {
29
+ if (!have_1bit_cin || tcg_op_deposit_valid(TCG_TYPE_TL, 1, TARGET_LONG_BITS - 1)) {
30
/* high = (T0 << 1) | cin */
31
TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src;
32
tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1);
33
@@ -XXX,XX +XXX,XX @@ static void gen_RCR(DisasContext *s, X86DecodedInsn *decode)
34
}
35
36
/* Save incoming carry into high, it will be shifted later. */
37
- if (!have_1bit_cin || TCG_TARGET_deposit_tl_valid(1, TARGET_LONG_BITS - 1)) {
38
+ if (!have_1bit_cin || tcg_op_deposit_valid(TCG_TYPE_TL, 1, TARGET_LONG_BITS - 1)) {
39
TCGv cin = have_1bit_cin ? decode->cc_dst : decode->cc_src;
40
tcg_gen_deposit_tl(high, cin, s->T0, 1, TARGET_LONG_BITS - 1);
41
} else {
42
--
43
2.43.0
44
45
diff view generated by jsdifflib
New patch
1
Do not reference TCG_TARGET_HAS_* directly.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
target/i386/tcg/emit.c.inc | 6 +++---
7
1 file changed, 3 insertions(+), 3 deletions(-)
8
9
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
10
index XXXXXXX..XXXXXXX 100644
11
--- a/target/i386/tcg/emit.c.inc
12
+++ b/target/i386/tcg/emit.c.inc
13
@@ -XXX,XX +XXX,XX @@
14
* The exact opcode to check depends on 32- vs. 64-bit.
15
*/
16
#ifdef TARGET_X86_64
17
-#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i64
18
+#define INDEX_op_extract2_tl INDEX_op_extract2_i64
19
#else
20
-#define TCG_TARGET_HAS_extract2_tl TCG_TARGET_HAS_extract2_i32
21
+#define INDEX_op_extract2_tl INDEX_op_extract2_i32
22
#endif
23
24
#define MMX_OFFSET(reg) \
25
@@ -XXX,XX +XXX,XX @@ static void gen_PMOVMSKB(DisasContext *s, X86DecodedInsn *decode)
26
tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, xmm_t0.ZMM_B(vec_len - 1)));
27
while (vec_len > 8) {
28
vec_len -= 8;
29
- if (TCG_TARGET_HAS_extract2_tl) {
30
+ if (tcg_op_supported(INDEX_op_extract2_tl, TCG_TYPE_TL, 0)) {
31
/*
32
* Load the next byte of the result into the high byte of T.
33
* TCG does a similar expansion of deposit to shl+extract2; by
34
--
35
2.43.0
36
37
diff view generated by jsdifflib
1
Allow the target to cache items from the guest page tables.
1
Make these features unconditional, as they're used by most
2
tcg backends anyway. Merge tcg-ldst.c.inc and tcg-pool.c.inc
3
into tcg.c and mark some of the functions unused, so that
4
when the features are not used we won't get Werrors.
2
5
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
include/exec/cpu-defs.h | 9 +++++++++
9
include/tcg/tcg.h | 4 -
9
1 file changed, 9 insertions(+)
10
tcg/aarch64/tcg-target.h | 2 -
11
tcg/arm/tcg-target.h | 2 -
12
tcg/i386/tcg-target.h | 2 -
13
tcg/loongarch64/tcg-target.h | 2 -
14
tcg/mips/tcg-target.h | 2 -
15
tcg/ppc/tcg-target.h | 2 -
16
tcg/riscv/tcg-target.h | 3 -
17
tcg/s390x/tcg-target.h | 2 -
18
tcg/sparc64/tcg-target.h | 2 -
19
tcg/tcg.c | 211 +++++++++++++++++++++++++++++--
20
tcg/aarch64/tcg-target.c.inc | 2 -
21
tcg/arm/tcg-target.c.inc | 2 -
22
tcg/i386/tcg-target.c.inc | 3 -
23
tcg/loongarch64/tcg-target.c.inc | 9 +-
24
tcg/mips/tcg-target.c.inc | 3 -
25
tcg/ppc/tcg-target.c.inc | 2 -
26
tcg/riscv/tcg-target.c.inc | 3 -
27
tcg/s390x/tcg-target.c.inc | 2 -
28
tcg/sparc64/tcg-target.c.inc | 3 -
29
tcg/tcg-ldst.c.inc | 65 ----------
30
tcg/tcg-pool.c.inc | 162 ------------------------
31
tcg/tci/tcg-target.c.inc | 12 +-
32
23 files changed, 216 insertions(+), 286 deletions(-)
33
delete mode 100644 tcg/tcg-ldst.c.inc
34
delete mode 100644 tcg/tcg-pool.c.inc
10
35
11
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
36
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
12
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
13
--- a/include/exec/cpu-defs.h
38
--- a/include/tcg/tcg.h
14
+++ b/include/exec/cpu-defs.h
39
+++ b/include/tcg/tcg.h
15
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntryFull {
40
@@ -XXX,XX +XXX,XX @@ struct TCGContext {
16
41
CPUState *cpu; /* *_trans */
17
/* @lg_page_size contains the log2 of the page size. */
42
18
uint8_t lg_page_size;
43
/* These structures are private to tcg-target.c.inc. */
44
-#ifdef TCG_TARGET_NEED_LDST_LABELS
45
QSIMPLEQ_HEAD(, TCGLabelQemuLdst) ldst_labels;
46
-#endif
47
-#ifdef TCG_TARGET_NEED_POOL_LABELS
48
struct TCGLabelPoolData *pool_labels;
49
-#endif
50
51
TCGLabel *exitreq_label;
52
53
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/aarch64/tcg-target.h
56
+++ b/tcg/aarch64/tcg-target.h
57
@@ -XXX,XX +XXX,XX @@ typedef enum {
58
#define TCG_TARGET_HAS_tst_vec 1
59
60
#define TCG_TARGET_DEFAULT_MO (0)
61
-#define TCG_TARGET_NEED_LDST_LABELS
62
-#define TCG_TARGET_NEED_POOL_LABELS
63
64
#endif /* AARCH64_TCG_TARGET_H */
65
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/arm/tcg-target.h
68
+++ b/tcg/arm/tcg-target.h
69
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
70
#define TCG_TARGET_HAS_tst_vec 1
71
72
#define TCG_TARGET_DEFAULT_MO (0)
73
-#define TCG_TARGET_NEED_LDST_LABELS
74
-#define TCG_TARGET_NEED_POOL_LABELS
75
76
#endif
77
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
78
index XXXXXXX..XXXXXXX 100644
79
--- a/tcg/i386/tcg-target.h
80
+++ b/tcg/i386/tcg-target.h
81
@@ -XXX,XX +XXX,XX @@ typedef enum {
82
#include "tcg/tcg-mo.h"
83
84
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
85
-#define TCG_TARGET_NEED_LDST_LABELS
86
-#define TCG_TARGET_NEED_POOL_LABELS
87
88
#endif
89
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
90
index XXXXXXX..XXXXXXX 100644
91
--- a/tcg/loongarch64/tcg-target.h
92
+++ b/tcg/loongarch64/tcg-target.h
93
@@ -XXX,XX +XXX,XX @@ typedef enum {
94
95
#define TCG_TARGET_DEFAULT_MO (0)
96
97
-#define TCG_TARGET_NEED_LDST_LABELS
98
-
99
#endif /* LOONGARCH_TCG_TARGET_H */
100
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/mips/tcg-target.h
103
+++ b/tcg/mips/tcg-target.h
104
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
105
#define TCG_TARGET_HAS_tst 0
106
107
#define TCG_TARGET_DEFAULT_MO 0
108
-#define TCG_TARGET_NEED_LDST_LABELS
109
-#define TCG_TARGET_NEED_POOL_LABELS
110
111
#endif
112
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/ppc/tcg-target.h
115
+++ b/tcg/ppc/tcg-target.h
116
@@ -XXX,XX +XXX,XX @@ typedef enum {
117
#define TCG_TARGET_HAS_tst_vec 0
118
119
#define TCG_TARGET_DEFAULT_MO (0)
120
-#define TCG_TARGET_NEED_LDST_LABELS
121
-#define TCG_TARGET_NEED_POOL_LABELS
122
123
#endif
124
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
125
index XXXXXXX..XXXXXXX 100644
126
--- a/tcg/riscv/tcg-target.h
127
+++ b/tcg/riscv/tcg-target.h
128
@@ -XXX,XX +XXX,XX @@ typedef enum {
129
130
#define TCG_TARGET_DEFAULT_MO (0)
131
132
-#define TCG_TARGET_NEED_LDST_LABELS
133
-#define TCG_TARGET_NEED_POOL_LABELS
134
-
135
#endif
136
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
137
index XXXXXXX..XXXXXXX 100644
138
--- a/tcg/s390x/tcg-target.h
139
+++ b/tcg/s390x/tcg-target.h
140
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
141
#define TCG_TARGET_HAS_tst_vec 0
142
143
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
144
-#define TCG_TARGET_NEED_LDST_LABELS
145
-#define TCG_TARGET_NEED_POOL_LABELS
146
147
#endif
148
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
149
index XXXXXXX..XXXXXXX 100644
150
--- a/tcg/sparc64/tcg-target.h
151
+++ b/tcg/sparc64/tcg-target.h
152
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
153
#define TCG_AREG0 TCG_REG_I0
154
155
#define TCG_TARGET_DEFAULT_MO (0)
156
-#define TCG_TARGET_NEED_LDST_LABELS
157
-#define TCG_TARGET_NEED_POOL_LABELS
158
159
#endif
160
diff --git a/tcg/tcg.c b/tcg/tcg.c
161
index XXXXXXX..XXXXXXX 100644
162
--- a/tcg/tcg.c
163
+++ b/tcg/tcg.c
164
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s);
165
static void tcg_target_qemu_prologue(TCGContext *s);
166
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
167
intptr_t value, intptr_t addend);
168
+static void tcg_out_nop_fill(tcg_insn_unit *p, int count);
169
+
170
+typedef struct TCGLabelQemuLdst TCGLabelQemuLdst;
171
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
172
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
173
174
/* The CIE and FDE header definitions will be common to all hosts. */
175
typedef struct {
176
@@ -XXX,XX +XXX,XX @@ typedef struct QEMU_PACKED {
177
DebugFrameFDEHeader fde;
178
} DebugFrameHeader;
179
180
-typedef struct TCGLabelQemuLdst {
181
+struct TCGLabelQemuLdst {
182
bool is_ld; /* qemu_ld: true, qemu_st: false */
183
MemOpIdx oi;
184
TCGType type; /* result type of a load */
185
@@ -XXX,XX +XXX,XX @@ typedef struct TCGLabelQemuLdst {
186
const tcg_insn_unit *raddr; /* addr of the next IR of qemu_ld/st IR */
187
tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
188
QSIMPLEQ_ENTRY(TCGLabelQemuLdst) next;
189
-} TCGLabelQemuLdst;
190
+};
191
192
static void tcg_register_jit_int(const void *buf, size_t size,
193
const void *debug_frame,
194
@@ -XXX,XX +XXX,XX @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
195
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
196
static bool tcg_target_const_match(int64_t val, int ct,
197
TCGType type, TCGCond cond, int vece);
198
-#ifdef TCG_TARGET_NEED_LDST_LABELS
199
-static int tcg_out_ldst_finalize(TCGContext *s);
200
-#endif
201
202
#ifndef CONFIG_USER_ONLY
203
#define guest_base ({ qemu_build_not_reached(); (uintptr_t)0; })
204
@@ -XXX,XX +XXX,XX @@ static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1,
205
}
206
}
207
208
+/*
209
+ * Allocate a new TCGLabelQemuLdst entry.
210
+ */
211
+
212
+__attribute__((unused))
213
+static TCGLabelQemuLdst *new_ldst_label(TCGContext *s)
214
+{
215
+ TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l));
216
+
217
+ memset(l, 0, sizeof(*l));
218
+ QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next);
219
+
220
+ return l;
221
+}
222
+
223
+/*
224
+ * Allocate new constant pool entries.
225
+ */
226
+
227
+typedef struct TCGLabelPoolData {
228
+ struct TCGLabelPoolData *next;
229
+ tcg_insn_unit *label;
230
+ intptr_t addend;
231
+ int rtype;
232
+ unsigned nlong;
233
+ tcg_target_ulong data[];
234
+} TCGLabelPoolData;
235
+
236
+static TCGLabelPoolData *new_pool_alloc(TCGContext *s, int nlong, int rtype,
237
+ tcg_insn_unit *label, intptr_t addend)
238
+{
239
+ TCGLabelPoolData *n = tcg_malloc(sizeof(TCGLabelPoolData)
240
+ + sizeof(tcg_target_ulong) * nlong);
241
+
242
+ n->label = label;
243
+ n->addend = addend;
244
+ n->rtype = rtype;
245
+ n->nlong = nlong;
246
+ return n;
247
+}
248
+
249
+static void new_pool_insert(TCGContext *s, TCGLabelPoolData *n)
250
+{
251
+ TCGLabelPoolData *i, **pp;
252
+ int nlong = n->nlong;
253
+
254
+ /* Insertion sort on the pool. */
255
+ for (pp = &s->pool_labels; (i = *pp) != NULL; pp = &i->next) {
256
+ if (nlong > i->nlong) {
257
+ break;
258
+ }
259
+ if (nlong < i->nlong) {
260
+ continue;
261
+ }
262
+ if (memcmp(n->data, i->data, sizeof(tcg_target_ulong) * nlong) >= 0) {
263
+ break;
264
+ }
265
+ }
266
+ n->next = *pp;
267
+ *pp = n;
268
+}
269
+
270
+/* The "usual" for generic integer code. */
271
+__attribute__((unused))
272
+static void new_pool_label(TCGContext *s, tcg_target_ulong d, int rtype,
273
+ tcg_insn_unit *label, intptr_t addend)
274
+{
275
+ TCGLabelPoolData *n = new_pool_alloc(s, 1, rtype, label, addend);
276
+ n->data[0] = d;
277
+ new_pool_insert(s, n);
278
+}
279
+
280
+/* For v64 or v128, depending on the host. */
281
+__attribute__((unused))
282
+static void new_pool_l2(TCGContext *s, int rtype, tcg_insn_unit *label,
283
+ intptr_t addend, tcg_target_ulong d0,
284
+ tcg_target_ulong d1)
285
+{
286
+ TCGLabelPoolData *n = new_pool_alloc(s, 2, rtype, label, addend);
287
+ n->data[0] = d0;
288
+ n->data[1] = d1;
289
+ new_pool_insert(s, n);
290
+}
291
+
292
+/* For v128 or v256, depending on the host. */
293
+__attribute__((unused))
294
+static void new_pool_l4(TCGContext *s, int rtype, tcg_insn_unit *label,
295
+ intptr_t addend, tcg_target_ulong d0,
296
+ tcg_target_ulong d1, tcg_target_ulong d2,
297
+ tcg_target_ulong d3)
298
+{
299
+ TCGLabelPoolData *n = new_pool_alloc(s, 4, rtype, label, addend);
300
+ n->data[0] = d0;
301
+ n->data[1] = d1;
302
+ n->data[2] = d2;
303
+ n->data[3] = d3;
304
+ new_pool_insert(s, n);
305
+}
306
+
307
+/* For v256, for 32-bit host. */
308
+__attribute__((unused))
309
+static void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label,
310
+ intptr_t addend, tcg_target_ulong d0,
311
+ tcg_target_ulong d1, tcg_target_ulong d2,
312
+ tcg_target_ulong d3, tcg_target_ulong d4,
313
+ tcg_target_ulong d5, tcg_target_ulong d6,
314
+ tcg_target_ulong d7)
315
+{
316
+ TCGLabelPoolData *n = new_pool_alloc(s, 8, rtype, label, addend);
317
+ n->data[0] = d0;
318
+ n->data[1] = d1;
319
+ n->data[2] = d2;
320
+ n->data[3] = d3;
321
+ n->data[4] = d4;
322
+ n->data[5] = d5;
323
+ n->data[6] = d6;
324
+ n->data[7] = d7;
325
+ new_pool_insert(s, n);
326
+}
327
+
328
+/*
329
+ * Generate TB finalization at the end of block
330
+ */
331
+
332
+static int tcg_out_ldst_finalize(TCGContext *s)
333
+{
334
+ TCGLabelQemuLdst *lb;
335
+
336
+ /* qemu_ld/st slow paths */
337
+ QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) {
338
+ if (lb->is_ld
339
+ ? !tcg_out_qemu_ld_slow_path(s, lb)
340
+ : !tcg_out_qemu_st_slow_path(s, lb)) {
341
+ return -2;
342
+ }
343
+
344
+ /*
345
+ * Test for (pending) buffer overflow. The assumption is that any
346
+ * one operation beginning below the high water mark cannot overrun
347
+ * the buffer completely. Thus we can test for overflow after
348
+ * generating code without having to check during generation.
349
+ */
350
+ if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
351
+ return -1;
352
+ }
353
+ }
354
+ return 0;
355
+}
356
+
357
+static int tcg_out_pool_finalize(TCGContext *s)
358
+{
359
+ TCGLabelPoolData *p = s->pool_labels;
360
+ TCGLabelPoolData *l = NULL;
361
+ void *a;
362
+
363
+ if (p == NULL) {
364
+ return 0;
365
+ }
19
+
366
+
20
+ /*
367
+ /*
21
+ * Allow target-specific additions to this structure.
368
+ * ??? Round up to qemu_icache_linesize, but then do not round
22
+ * This may be used to cache items from the guest cpu
369
+ * again when allocating the next TranslationBlock structure.
23
+ * page tables for later use by the implementation.
24
+ */
370
+ */
25
+#ifdef TARGET_PAGE_ENTRY_EXTRA
371
+ a = (void *)ROUND_UP((uintptr_t)s->code_ptr,
26
+ TARGET_PAGE_ENTRY_EXTRA
372
+ sizeof(tcg_target_ulong) * p->nlong);
27
+#endif
373
+ tcg_out_nop_fill(s->code_ptr, (tcg_insn_unit *)a - s->code_ptr);
28
} CPUTLBEntryFull;
374
+ s->data_gen_ptr = a;
375
+
376
+ for (; p != NULL; p = p->next) {
377
+ size_t size = sizeof(tcg_target_ulong) * p->nlong;
378
+ uintptr_t value;
379
+
380
+ if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) {
381
+ if (unlikely(a > s->code_gen_highwater)) {
382
+ return -1;
383
+ }
384
+ memcpy(a, p->data, size);
385
+ a += size;
386
+ l = p;
387
+ }
388
+
389
+ value = (uintptr_t)tcg_splitwx_to_rx(a) - size;
390
+ if (!patch_reloc(p->label, p->rtype, value, p->addend)) {
391
+ return -2;
392
+ }
393
+ }
394
+
395
+ s->code_ptr = a;
396
+ return 0;
397
+}
398
+
399
#define C_PFX1(P, A) P##A
400
#define C_PFX2(P, A, B) P##A##_##B
401
#define C_PFX3(P, A, B, C) P##A##_##B##_##C
402
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
403
s->code_ptr = s->code_buf;
404
s->data_gen_ptr = NULL;
405
406
-#ifdef TCG_TARGET_NEED_LDST_LABELS
407
QSIMPLEQ_INIT(&s->ldst_labels);
408
-#endif
409
-#ifdef TCG_TARGET_NEED_POOL_LABELS
410
s->pool_labels = NULL;
411
-#endif
412
413
start_words = s->insn_start_words;
414
s->gen_insn_data =
415
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
416
s->gen_insn_end_off[num_insns] = tcg_current_code_size(s);
417
418
/* Generate TB finalization at the end of block */
419
-#ifdef TCG_TARGET_NEED_LDST_LABELS
420
i = tcg_out_ldst_finalize(s);
421
if (i < 0) {
422
return i;
423
}
424
-#endif
425
-#ifdef TCG_TARGET_NEED_POOL_LABELS
426
i = tcg_out_pool_finalize(s);
427
if (i < 0) {
428
return i;
429
}
430
-#endif
431
if (!tcg_resolve_relocs(s)) {
432
return -2;
433
}
434
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
435
index XXXXXXX..XXXXXXX 100644
436
--- a/tcg/aarch64/tcg-target.c.inc
437
+++ b/tcg/aarch64/tcg-target.c.inc
438
@@ -XXX,XX +XXX,XX @@
439
* See the COPYING file in the top-level directory for details.
440
*/
441
442
-#include "../tcg-ldst.c.inc"
443
-#include "../tcg-pool.c.inc"
444
#include "qemu/bitops.h"
445
446
/* Used for function call generation. */
447
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
448
index XXXXXXX..XXXXXXX 100644
449
--- a/tcg/arm/tcg-target.c.inc
450
+++ b/tcg/arm/tcg-target.c.inc
451
@@ -XXX,XX +XXX,XX @@
452
*/
453
454
#include "elf.h"
455
-#include "../tcg-ldst.c.inc"
456
-#include "../tcg-pool.c.inc"
457
458
int arm_arch = __ARM_ARCH;
459
460
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
461
index XXXXXXX..XXXXXXX 100644
462
--- a/tcg/i386/tcg-target.c.inc
463
+++ b/tcg/i386/tcg-target.c.inc
464
@@ -XXX,XX +XXX,XX @@
465
* THE SOFTWARE.
466
*/
467
468
-#include "../tcg-ldst.c.inc"
469
-#include "../tcg-pool.c.inc"
470
-
471
/* Used for function call generation. */
472
#define TCG_TARGET_STACK_ALIGN 16
473
#if defined(_WIN64)
474
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
475
index XXXXXXX..XXXXXXX 100644
476
--- a/tcg/loongarch64/tcg-target.c.inc
477
+++ b/tcg/loongarch64/tcg-target.c.inc
478
@@ -XXX,XX +XXX,XX @@
479
* THE SOFTWARE.
480
*/
481
482
-#include "../tcg-ldst.c.inc"
483
#include <asm/hwcap.h>
484
485
/* used for function call generation */
486
@@ -XXX,XX +XXX,XX @@ static void tcg_out_tb_start(TCGContext *s)
487
/* nothing to do */
488
}
489
490
+static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
491
+{
492
+ for (int i = 0; i < count; ++i) {
493
+ /* Canonical nop is andi r0,r0,0 */
494
+ p[i] = OPC_ANDI;
495
+ }
496
+}
497
+
498
static void tcg_target_init(TCGContext *s)
499
{
500
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
501
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
502
index XXXXXXX..XXXXXXX 100644
503
--- a/tcg/mips/tcg-target.c.inc
504
+++ b/tcg/mips/tcg-target.c.inc
505
@@ -XXX,XX +XXX,XX @@
506
* THE SOFTWARE.
507
*/
508
509
-#include "../tcg-ldst.c.inc"
510
-#include "../tcg-pool.c.inc"
511
-
512
/* used for function call generation */
513
#define TCG_TARGET_STACK_ALIGN 16
514
#if _MIPS_SIM == _ABIO32
515
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
516
index XXXXXXX..XXXXXXX 100644
517
--- a/tcg/ppc/tcg-target.c.inc
518
+++ b/tcg/ppc/tcg-target.c.inc
519
@@ -XXX,XX +XXX,XX @@
520
*/
521
522
#include "elf.h"
523
-#include "../tcg-pool.c.inc"
524
-#include "../tcg-ldst.c.inc"
29
525
30
/*
526
/*
527
* Standardize on the _CALL_FOO symbols used by GCC:
528
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
529
index XXXXXXX..XXXXXXX 100644
530
--- a/tcg/riscv/tcg-target.c.inc
531
+++ b/tcg/riscv/tcg-target.c.inc
532
@@ -XXX,XX +XXX,XX @@
533
* THE SOFTWARE.
534
*/
535
536
-#include "../tcg-ldst.c.inc"
537
-#include "../tcg-pool.c.inc"
538
-
539
/* Used for function call generation. */
540
#define TCG_REG_CALL_STACK TCG_REG_SP
541
#define TCG_TARGET_STACK_ALIGN 16
542
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
543
index XXXXXXX..XXXXXXX 100644
544
--- a/tcg/s390x/tcg-target.c.inc
545
+++ b/tcg/s390x/tcg-target.c.inc
546
@@ -XXX,XX +XXX,XX @@
547
* THE SOFTWARE.
548
*/
549
550
-#include "../tcg-ldst.c.inc"
551
-#include "../tcg-pool.c.inc"
552
#include "elf.h"
553
554
/* Used for function call generation. */
555
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
556
index XXXXXXX..XXXXXXX 100644
557
--- a/tcg/sparc64/tcg-target.c.inc
558
+++ b/tcg/sparc64/tcg-target.c.inc
559
@@ -XXX,XX +XXX,XX @@
560
#error "unsupported code generation mode"
561
#endif
562
563
-#include "../tcg-ldst.c.inc"
564
-#include "../tcg-pool.c.inc"
565
-
566
/* Used for function call generation. */
567
#define TCG_REG_CALL_STACK TCG_REG_O6
568
#define TCG_TARGET_STACK_BIAS 2047
569
diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc
570
deleted file mode 100644
571
index XXXXXXX..XXXXXXX
572
--- a/tcg/tcg-ldst.c.inc
573
+++ /dev/null
574
@@ -XXX,XX +XXX,XX @@
575
-/*
576
- * TCG Backend Data: load-store optimization only.
577
- *
578
- * Permission is hereby granted, free of charge, to any person obtaining a copy
579
- * of this software and associated documentation files (the "Software"), to deal
580
- * in the Software without restriction, including without limitation the rights
581
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
582
- * copies of the Software, and to permit persons to whom the Software is
583
- * furnished to do so, subject to the following conditions:
584
- *
585
- * The above copyright notice and this permission notice shall be included in
586
- * all copies or substantial portions of the Software.
587
- *
588
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
589
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
590
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
591
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
592
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
593
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
594
- * THE SOFTWARE.
595
- */
596
-
597
-/*
598
- * Generate TB finalization at the end of block
599
- */
600
-
601
-static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
602
-static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l);
603
-
604
-static int tcg_out_ldst_finalize(TCGContext *s)
605
-{
606
- TCGLabelQemuLdst *lb;
607
-
608
- /* qemu_ld/st slow paths */
609
- QSIMPLEQ_FOREACH(lb, &s->ldst_labels, next) {
610
- if (lb->is_ld
611
- ? !tcg_out_qemu_ld_slow_path(s, lb)
612
- : !tcg_out_qemu_st_slow_path(s, lb)) {
613
- return -2;
614
- }
615
-
616
- /* Test for (pending) buffer overflow. The assumption is that any
617
- one operation beginning below the high water mark cannot overrun
618
- the buffer completely. Thus we can test for overflow after
619
- generating code without having to check during generation. */
620
- if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) {
621
- return -1;
622
- }
623
- }
624
- return 0;
625
-}
626
-
627
-/*
628
- * Allocate a new TCGLabelQemuLdst entry.
629
- */
630
-
631
-static inline TCGLabelQemuLdst *new_ldst_label(TCGContext *s)
632
-{
633
- TCGLabelQemuLdst *l = tcg_malloc(sizeof(*l));
634
-
635
- memset(l, 0, sizeof(*l));
636
- QSIMPLEQ_INSERT_TAIL(&s->ldst_labels, l, next);
637
-
638
- return l;
639
-}
640
diff --git a/tcg/tcg-pool.c.inc b/tcg/tcg-pool.c.inc
641
deleted file mode 100644
642
index XXXXXXX..XXXXXXX
643
--- a/tcg/tcg-pool.c.inc
644
+++ /dev/null
645
@@ -XXX,XX +XXX,XX @@
646
-/*
647
- * TCG Backend Data: constant pool.
648
- *
649
- * Permission is hereby granted, free of charge, to any person obtaining a copy
650
- * of this software and associated documentation files (the "Software"), to deal
651
- * in the Software without restriction, including without limitation the rights
652
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
653
- * copies of the Software, and to permit persons to whom the Software is
654
- * furnished to do so, subject to the following conditions:
655
- *
656
- * The above copyright notice and this permission notice shall be included in
657
- * all copies or substantial portions of the Software.
658
- *
659
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
660
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
661
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
662
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
663
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
664
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
665
- * THE SOFTWARE.
666
- */
667
-
668
-typedef struct TCGLabelPoolData {
669
- struct TCGLabelPoolData *next;
670
- tcg_insn_unit *label;
671
- intptr_t addend;
672
- int rtype;
673
- unsigned nlong;
674
- tcg_target_ulong data[];
675
-} TCGLabelPoolData;
676
-
677
-
678
-static TCGLabelPoolData *new_pool_alloc(TCGContext *s, int nlong, int rtype,
679
- tcg_insn_unit *label, intptr_t addend)
680
-{
681
- TCGLabelPoolData *n = tcg_malloc(sizeof(TCGLabelPoolData)
682
- + sizeof(tcg_target_ulong) * nlong);
683
-
684
- n->label = label;
685
- n->addend = addend;
686
- n->rtype = rtype;
687
- n->nlong = nlong;
688
- return n;
689
-}
690
-
691
-static void new_pool_insert(TCGContext *s, TCGLabelPoolData *n)
692
-{
693
- TCGLabelPoolData *i, **pp;
694
- int nlong = n->nlong;
695
-
696
- /* Insertion sort on the pool. */
697
- for (pp = &s->pool_labels; (i = *pp) != NULL; pp = &i->next) {
698
- if (nlong > i->nlong) {
699
- break;
700
- }
701
- if (nlong < i->nlong) {
702
- continue;
703
- }
704
- if (memcmp(n->data, i->data, sizeof(tcg_target_ulong) * nlong) >= 0) {
705
- break;
706
- }
707
- }
708
- n->next = *pp;
709
- *pp = n;
710
-}
711
-
712
-/* The "usual" for generic integer code. */
713
-static inline void new_pool_label(TCGContext *s, tcg_target_ulong d, int rtype,
714
- tcg_insn_unit *label, intptr_t addend)
715
-{
716
- TCGLabelPoolData *n = new_pool_alloc(s, 1, rtype, label, addend);
717
- n->data[0] = d;
718
- new_pool_insert(s, n);
719
-}
720
-
721
-/* For v64 or v128, depending on the host. */
722
-static inline void new_pool_l2(TCGContext *s, int rtype, tcg_insn_unit *label,
723
- intptr_t addend, tcg_target_ulong d0,
724
- tcg_target_ulong d1)
725
-{
726
- TCGLabelPoolData *n = new_pool_alloc(s, 2, rtype, label, addend);
727
- n->data[0] = d0;
728
- n->data[1] = d1;
729
- new_pool_insert(s, n);
730
-}
731
-
732
-/* For v128 or v256, depending on the host. */
733
-static inline void new_pool_l4(TCGContext *s, int rtype, tcg_insn_unit *label,
734
- intptr_t addend, tcg_target_ulong d0,
735
- tcg_target_ulong d1, tcg_target_ulong d2,
736
- tcg_target_ulong d3)
737
-{
738
- TCGLabelPoolData *n = new_pool_alloc(s, 4, rtype, label, addend);
739
- n->data[0] = d0;
740
- n->data[1] = d1;
741
- n->data[2] = d2;
742
- n->data[3] = d3;
743
- new_pool_insert(s, n);
744
-}
745
-
746
-/* For v256, for 32-bit host. */
747
-static inline void new_pool_l8(TCGContext *s, int rtype, tcg_insn_unit *label,
748
- intptr_t addend, tcg_target_ulong d0,
749
- tcg_target_ulong d1, tcg_target_ulong d2,
750
- tcg_target_ulong d3, tcg_target_ulong d4,
751
- tcg_target_ulong d5, tcg_target_ulong d6,
752
- tcg_target_ulong d7)
753
-{
754
- TCGLabelPoolData *n = new_pool_alloc(s, 8, rtype, label, addend);
755
- n->data[0] = d0;
756
- n->data[1] = d1;
757
- n->data[2] = d2;
758
- n->data[3] = d3;
759
- n->data[4] = d4;
760
- n->data[5] = d5;
761
- n->data[6] = d6;
762
- n->data[7] = d7;
763
- new_pool_insert(s, n);
764
-}
765
-
766
-/* To be provided by cpu/tcg-target.c.inc. */
767
-static void tcg_out_nop_fill(tcg_insn_unit *p, int count);
768
-
769
-static int tcg_out_pool_finalize(TCGContext *s)
770
-{
771
- TCGLabelPoolData *p = s->pool_labels;
772
- TCGLabelPoolData *l = NULL;
773
- void *a;
774
-
775
- if (p == NULL) {
776
- return 0;
777
- }
778
-
779
- /* ??? Round up to qemu_icache_linesize, but then do not round
780
- again when allocating the next TranslationBlock structure. */
781
- a = (void *)ROUND_UP((uintptr_t)s->code_ptr,
782
- sizeof(tcg_target_ulong) * p->nlong);
783
- tcg_out_nop_fill(s->code_ptr, (tcg_insn_unit *)a - s->code_ptr);
784
- s->data_gen_ptr = a;
785
-
786
- for (; p != NULL; p = p->next) {
787
- size_t size = sizeof(tcg_target_ulong) * p->nlong;
788
- uintptr_t value;
789
-
790
- if (!l || l->nlong != p->nlong || memcmp(l->data, p->data, size)) {
791
- if (unlikely(a > s->code_gen_highwater)) {
792
- return -1;
793
- }
794
- memcpy(a, p->data, size);
795
- a += size;
796
- l = p;
797
- }
798
-
799
- value = (uintptr_t)tcg_splitwx_to_rx(a) - size;
800
- if (!patch_reloc(p->label, p->rtype, value, p->addend)) {
801
- return -2;
802
- }
803
- }
804
-
805
- s->code_ptr = a;
806
- return 0;
807
-}
808
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
809
index XXXXXXX..XXXXXXX 100644
810
--- a/tcg/tci/tcg-target.c.inc
811
+++ b/tcg/tci/tcg-target.c.inc
812
@@ -XXX,XX +XXX,XX @@
813
* THE SOFTWARE.
814
*/
815
816
-#include "../tcg-pool.c.inc"
817
-
818
/* Used for function call generation. */
819
#define TCG_TARGET_CALL_STACK_OFFSET 0
820
#define TCG_TARGET_STACK_ALIGN 8
821
@@ -XXX,XX +XXX,XX @@ bool tcg_target_has_memory_bswap(MemOp memop)
822
{
823
return true;
824
}
825
+
826
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
827
+{
828
+ g_assert_not_reached();
829
+}
830
+
831
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
832
+{
833
+ g_assert_not_reached();
834
+}
31
--
835
--
32
2.34.1
836
2.43.0
33
837
34
838
diff view generated by jsdifflib
New patch
1
In addition, add empty files for mips, sparc64 and tci.
2
Make the include unconditional within tcg-opc.h.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-opc.h | 4 +---
8
tcg/aarch64/{tcg-target.opc.h => tcg-target-opc.h.inc} | 0
9
tcg/arm/{tcg-target.opc.h => tcg-target-opc.h.inc} | 0
10
tcg/i386/{tcg-target.opc.h => tcg-target-opc.h.inc} | 0
11
tcg/loongarch64/{tcg-target.opc.h => tcg-target-opc.h.inc} | 0
12
tcg/mips/tcg-target-opc.h.inc | 1 +
13
tcg/ppc/{tcg-target.opc.h => tcg-target-opc.h.inc} | 0
14
tcg/riscv/{tcg-target.opc.h => tcg-target-opc.h.inc} | 0
15
tcg/s390x/{tcg-target.opc.h => tcg-target-opc.h.inc} | 0
16
tcg/sparc64/tcg-target-opc.h.inc | 1 +
17
tcg/tci/tcg-target-opc.h.inc | 1 +
18
11 files changed, 4 insertions(+), 3 deletions(-)
19
rename tcg/aarch64/{tcg-target.opc.h => tcg-target-opc.h.inc} (100%)
20
rename tcg/arm/{tcg-target.opc.h => tcg-target-opc.h.inc} (100%)
21
rename tcg/i386/{tcg-target.opc.h => tcg-target-opc.h.inc} (100%)
22
rename tcg/loongarch64/{tcg-target.opc.h => tcg-target-opc.h.inc} (100%)
23
create mode 100644 tcg/mips/tcg-target-opc.h.inc
24
rename tcg/ppc/{tcg-target.opc.h => tcg-target-opc.h.inc} (100%)
25
rename tcg/riscv/{tcg-target.opc.h => tcg-target-opc.h.inc} (100%)
26
rename tcg/s390x/{tcg-target.opc.h => tcg-target-opc.h.inc} (100%)
27
create mode 100644 tcg/sparc64/tcg-target-opc.h.inc
28
create mode 100644 tcg/tci/tcg-target-opc.h.inc
29
30
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
31
index XXXXXXX..XXXXXXX 100644
32
--- a/include/tcg/tcg-opc.h
33
+++ b/include/tcg/tcg-opc.h
34
@@ -XXX,XX +XXX,XX @@ DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec))
35
36
DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
37
38
-#if TCG_TARGET_MAYBE_vec
39
-#include "tcg-target.opc.h"
40
-#endif
41
+#include "tcg-target-opc.h.inc"
42
43
#ifdef TCG_TARGET_INTERPRETER
44
/* These opcodes are only for use between the tci generator and interpreter. */
45
diff --git a/tcg/aarch64/tcg-target.opc.h b/tcg/aarch64/tcg-target-opc.h.inc
46
similarity index 100%
47
rename from tcg/aarch64/tcg-target.opc.h
48
rename to tcg/aarch64/tcg-target-opc.h.inc
49
diff --git a/tcg/arm/tcg-target.opc.h b/tcg/arm/tcg-target-opc.h.inc
50
similarity index 100%
51
rename from tcg/arm/tcg-target.opc.h
52
rename to tcg/arm/tcg-target-opc.h.inc
53
diff --git a/tcg/i386/tcg-target.opc.h b/tcg/i386/tcg-target-opc.h.inc
54
similarity index 100%
55
rename from tcg/i386/tcg-target.opc.h
56
rename to tcg/i386/tcg-target-opc.h.inc
57
diff --git a/tcg/loongarch64/tcg-target.opc.h b/tcg/loongarch64/tcg-target-opc.h.inc
58
similarity index 100%
59
rename from tcg/loongarch64/tcg-target.opc.h
60
rename to tcg/loongarch64/tcg-target-opc.h.inc
61
diff --git a/tcg/mips/tcg-target-opc.h.inc b/tcg/mips/tcg-target-opc.h.inc
62
new file mode 100644
63
index XXXXXXX..XXXXXXX
64
--- /dev/null
65
+++ b/tcg/mips/tcg-target-opc.h.inc
66
@@ -0,0 +1 @@
67
+/* No target specific opcodes. */
68
diff --git a/tcg/ppc/tcg-target.opc.h b/tcg/ppc/tcg-target-opc.h.inc
69
similarity index 100%
70
rename from tcg/ppc/tcg-target.opc.h
71
rename to tcg/ppc/tcg-target-opc.h.inc
72
diff --git a/tcg/riscv/tcg-target.opc.h b/tcg/riscv/tcg-target-opc.h.inc
73
similarity index 100%
74
rename from tcg/riscv/tcg-target.opc.h
75
rename to tcg/riscv/tcg-target-opc.h.inc
76
diff --git a/tcg/s390x/tcg-target.opc.h b/tcg/s390x/tcg-target-opc.h.inc
77
similarity index 100%
78
rename from tcg/s390x/tcg-target.opc.h
79
rename to tcg/s390x/tcg-target-opc.h.inc
80
diff --git a/tcg/sparc64/tcg-target-opc.h.inc b/tcg/sparc64/tcg-target-opc.h.inc
81
new file mode 100644
82
index XXXXXXX..XXXXXXX
83
--- /dev/null
84
+++ b/tcg/sparc64/tcg-target-opc.h.inc
85
@@ -0,0 +1 @@
86
+/* No target specific opcodes. */
87
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
88
new file mode 100644
89
index XXXXXXX..XXXXXXX
90
--- /dev/null
91
+++ b/tcg/tci/tcg-target-opc.h.inc
92
@@ -0,0 +1 @@
93
+/* No target specific opcodes. */
94
--
95
2.43.0
96
97
diff view generated by jsdifflib
New patch
1
Now that tcg-target-opc.h.inc is unconditional,
2
we can move these out of the generic header.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg-opc.h | 6 ------
8
tcg/tci/tcg-target-opc.h.inc | 5 ++++-
9
2 files changed, 4 insertions(+), 7 deletions(-)
10
11
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/tcg/tcg-opc.h
14
+++ b/include/tcg/tcg-opc.h
15
@@ -XXX,XX +XXX,XX @@ DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
16
17
#include "tcg-target-opc.h.inc"
18
19
-#ifdef TCG_TARGET_INTERPRETER
20
-/* These opcodes are only for use between the tci generator and interpreter. */
21
-DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
22
-DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
23
-#endif
24
-
25
#undef DATA64_ARGS
26
#undef IMPL
27
#undef IMPL64
28
diff --git a/tcg/tci/tcg-target-opc.h.inc b/tcg/tci/tcg-target-opc.h.inc
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tcg/tci/tcg-target-opc.h.inc
31
+++ b/tcg/tci/tcg-target-opc.h.inc
32
@@ -1 +1,4 @@
33
-/* No target specific opcodes. */
34
+/* SPDX-License-Identifier: MIT */
35
+/* These opcodes for use between the tci generator and interpreter. */
36
+DEF(tci_movi, 1, 0, 1, TCG_OPF_NOT_PRESENT)
37
+DEF(tci_movl, 1, 0, 1, TCG_OPF_NOT_PRESENT)
38
--
39
2.43.0
40
41
diff view generated by jsdifflib
New patch
1
Don't reference TCG_TARGET_MAYBE_vec in a public header.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/tcg/tcg.h | 7 -------
7
tcg/tcg.c | 4 ++++
8
2 files changed, 4 insertions(+), 7 deletions(-)
9
10
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg.h
13
+++ b/include/tcg/tcg.h
14
@@ -XXX,XX +XXX,XX @@ extern tcg_prologue_fn *tcg_qemu_tb_exec;
15
16
void tcg_register_jit(const void *buf, size_t buf_size);
17
18
-#if TCG_TARGET_MAYBE_vec
19
/* Return zero if the tuple (opc, type, vece) is unsupportable;
20
return > 0 if it is directly supportable;
21
return < 0 if we must call tcg_expand_vec_op. */
22
int tcg_can_emit_vec_op(TCGOpcode, TCGType, unsigned);
23
-#else
24
-static inline int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
25
-{
26
- return 0;
27
-}
28
-#endif
29
30
/* Expand the tuple (opc, type, vece) on the given arguments. */
31
void tcg_expand_vec_op(TCGOpcode, TCGType, unsigned, TCGArg, ...);
32
diff --git a/tcg/tcg.c b/tcg/tcg.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/tcg.c
35
+++ b/tcg/tcg.c
36
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
37
{
38
g_assert_not_reached();
39
}
40
+int tcg_can_emit_vec_op(TCGOpcode o, TCGType t, unsigned ve)
41
+{
42
+ return 0;
43
+}
44
#endif
45
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
46
intptr_t arg2);
47
--
48
2.43.0
49
50
diff view generated by jsdifflib
New patch
1
Left-over from commit 623d7e3551a ("util: Add cpuinfo-ppc.c").
1
2
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Message-ID: <20250108215156.8731-2-philmd@linaro.org>
6
---
7
tcg/ppc/tcg-target.h | 8 --------
8
1 file changed, 8 deletions(-)
9
10
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/ppc/tcg-target.h
13
+++ b/tcg/ppc/tcg-target.h
14
@@ -XXX,XX +XXX,XX @@ typedef enum {
15
TCG_AREG0 = TCG_REG_R27
16
} TCGReg;
17
18
-typedef enum {
19
- tcg_isa_base,
20
- tcg_isa_2_06,
21
- tcg_isa_2_07,
22
- tcg_isa_3_00,
23
- tcg_isa_3_10,
24
-} TCGPowerISA;
25
-
26
#define have_isa_2_06 (cpuinfo & CPUINFO_V2_06)
27
#define have_isa_2_07 (cpuinfo & CPUINFO_V2_07)
28
#define have_isa_3_00 (cpuinfo & CPUINFO_V3_0)
29
--
30
2.43.0
31
32
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-3-philmd@linaro.org>
4
---
5
include/tcg/tcg.h | 105 +-----------------------------------------
6
tcg/tcg-has.h | 115 ++++++++++++++++++++++++++++++++++++++++++++++
7
2 files changed, 116 insertions(+), 104 deletions(-)
8
create mode 100644 tcg/tcg-has.h
1
9
10
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg.h
13
+++ b/include/tcg/tcg.h
14
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
15
#error unsupported
16
#endif
17
18
-#if TCG_TARGET_REG_BITS == 32
19
-/* Turn some undef macros into false macros. */
20
-#define TCG_TARGET_HAS_extr_i64_i32 0
21
-#define TCG_TARGET_HAS_div_i64 0
22
-#define TCG_TARGET_HAS_rem_i64 0
23
-#define TCG_TARGET_HAS_div2_i64 0
24
-#define TCG_TARGET_HAS_rot_i64 0
25
-#define TCG_TARGET_HAS_ext8s_i64 0
26
-#define TCG_TARGET_HAS_ext16s_i64 0
27
-#define TCG_TARGET_HAS_ext32s_i64 0
28
-#define TCG_TARGET_HAS_ext8u_i64 0
29
-#define TCG_TARGET_HAS_ext16u_i64 0
30
-#define TCG_TARGET_HAS_ext32u_i64 0
31
-#define TCG_TARGET_HAS_bswap16_i64 0
32
-#define TCG_TARGET_HAS_bswap32_i64 0
33
-#define TCG_TARGET_HAS_bswap64_i64 0
34
-#define TCG_TARGET_HAS_not_i64 0
35
-#define TCG_TARGET_HAS_andc_i64 0
36
-#define TCG_TARGET_HAS_orc_i64 0
37
-#define TCG_TARGET_HAS_eqv_i64 0
38
-#define TCG_TARGET_HAS_nand_i64 0
39
-#define TCG_TARGET_HAS_nor_i64 0
40
-#define TCG_TARGET_HAS_clz_i64 0
41
-#define TCG_TARGET_HAS_ctz_i64 0
42
-#define TCG_TARGET_HAS_ctpop_i64 0
43
-#define TCG_TARGET_HAS_deposit_i64 0
44
-#define TCG_TARGET_HAS_extract_i64 0
45
-#define TCG_TARGET_HAS_sextract_i64 0
46
-#define TCG_TARGET_HAS_extract2_i64 0
47
-#define TCG_TARGET_HAS_negsetcond_i64 0
48
-#define TCG_TARGET_HAS_add2_i64 0
49
-#define TCG_TARGET_HAS_sub2_i64 0
50
-#define TCG_TARGET_HAS_mulu2_i64 0
51
-#define TCG_TARGET_HAS_muls2_i64 0
52
-#define TCG_TARGET_HAS_muluh_i64 0
53
-#define TCG_TARGET_HAS_mulsh_i64 0
54
-/* Turn some undef macros into true macros. */
55
-#define TCG_TARGET_HAS_add2_i32 1
56
-#define TCG_TARGET_HAS_sub2_i32 1
57
-#endif
58
-
59
-#ifndef TCG_TARGET_deposit_i32_valid
60
-#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
61
-#endif
62
-#ifndef TCG_TARGET_deposit_i64_valid
63
-#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
64
-#endif
65
-#ifndef TCG_TARGET_extract_i32_valid
66
-#define TCG_TARGET_extract_i32_valid(ofs, len) 1
67
-#endif
68
-#ifndef TCG_TARGET_extract_i64_valid
69
-#define TCG_TARGET_extract_i64_valid(ofs, len) 1
70
-#endif
71
-
72
-/* Only one of DIV or DIV2 should be defined. */
73
-#if defined(TCG_TARGET_HAS_div_i32)
74
-#define TCG_TARGET_HAS_div2_i32 0
75
-#elif defined(TCG_TARGET_HAS_div2_i32)
76
-#define TCG_TARGET_HAS_div_i32 0
77
-#define TCG_TARGET_HAS_rem_i32 0
78
-#endif
79
-#if defined(TCG_TARGET_HAS_div_i64)
80
-#define TCG_TARGET_HAS_div2_i64 0
81
-#elif defined(TCG_TARGET_HAS_div2_i64)
82
-#define TCG_TARGET_HAS_div_i64 0
83
-#define TCG_TARGET_HAS_rem_i64 0
84
-#endif
85
-
86
-#if !defined(TCG_TARGET_HAS_v64) \
87
- && !defined(TCG_TARGET_HAS_v128) \
88
- && !defined(TCG_TARGET_HAS_v256)
89
-#define TCG_TARGET_MAYBE_vec 0
90
-#define TCG_TARGET_HAS_abs_vec 0
91
-#define TCG_TARGET_HAS_neg_vec 0
92
-#define TCG_TARGET_HAS_not_vec 0
93
-#define TCG_TARGET_HAS_andc_vec 0
94
-#define TCG_TARGET_HAS_orc_vec 0
95
-#define TCG_TARGET_HAS_nand_vec 0
96
-#define TCG_TARGET_HAS_nor_vec 0
97
-#define TCG_TARGET_HAS_eqv_vec 0
98
-#define TCG_TARGET_HAS_roti_vec 0
99
-#define TCG_TARGET_HAS_rots_vec 0
100
-#define TCG_TARGET_HAS_rotv_vec 0
101
-#define TCG_TARGET_HAS_shi_vec 0
102
-#define TCG_TARGET_HAS_shs_vec 0
103
-#define TCG_TARGET_HAS_shv_vec 0
104
-#define TCG_TARGET_HAS_mul_vec 0
105
-#define TCG_TARGET_HAS_sat_vec 0
106
-#define TCG_TARGET_HAS_minmax_vec 0
107
-#define TCG_TARGET_HAS_bitsel_vec 0
108
-#define TCG_TARGET_HAS_cmpsel_vec 0
109
-#define TCG_TARGET_HAS_tst_vec 0
110
-#else
111
-#define TCG_TARGET_MAYBE_vec 1
112
-#endif
113
-#ifndef TCG_TARGET_HAS_v64
114
-#define TCG_TARGET_HAS_v64 0
115
-#endif
116
-#ifndef TCG_TARGET_HAS_v128
117
-#define TCG_TARGET_HAS_v128 0
118
-#endif
119
-#ifndef TCG_TARGET_HAS_v256
120
-#define TCG_TARGET_HAS_v256 0
121
-#endif
122
+#include "tcg/tcg-has.h"
123
124
typedef enum TCGOpcode {
125
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
126
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
127
new file mode 100644
128
index XXXXXXX..XXXXXXX
129
--- /dev/null
130
+++ b/tcg/tcg-has.h
131
@@ -XXX,XX +XXX,XX @@
132
+/* SPDX-License-Identifier: MIT */
133
+/*
134
+ * Define target-specific opcode support
135
+ * Copyright (c) 2024 Linaro, Ltd.
136
+ */
137
+
138
+#ifndef TCG_HAS_H
139
+#define TCG_HAS_H
140
+
141
+#if TCG_TARGET_REG_BITS == 32
142
+/* Turn some undef macros into false macros. */
143
+#define TCG_TARGET_HAS_extr_i64_i32 0
144
+#define TCG_TARGET_HAS_div_i64 0
145
+#define TCG_TARGET_HAS_rem_i64 0
146
+#define TCG_TARGET_HAS_div2_i64 0
147
+#define TCG_TARGET_HAS_rot_i64 0
148
+#define TCG_TARGET_HAS_ext8s_i64 0
149
+#define TCG_TARGET_HAS_ext16s_i64 0
150
+#define TCG_TARGET_HAS_ext32s_i64 0
151
+#define TCG_TARGET_HAS_ext8u_i64 0
152
+#define TCG_TARGET_HAS_ext16u_i64 0
153
+#define TCG_TARGET_HAS_ext32u_i64 0
154
+#define TCG_TARGET_HAS_bswap16_i64 0
155
+#define TCG_TARGET_HAS_bswap32_i64 0
156
+#define TCG_TARGET_HAS_bswap64_i64 0
157
+#define TCG_TARGET_HAS_not_i64 0
158
+#define TCG_TARGET_HAS_andc_i64 0
159
+#define TCG_TARGET_HAS_orc_i64 0
160
+#define TCG_TARGET_HAS_eqv_i64 0
161
+#define TCG_TARGET_HAS_nand_i64 0
162
+#define TCG_TARGET_HAS_nor_i64 0
163
+#define TCG_TARGET_HAS_clz_i64 0
164
+#define TCG_TARGET_HAS_ctz_i64 0
165
+#define TCG_TARGET_HAS_ctpop_i64 0
166
+#define TCG_TARGET_HAS_deposit_i64 0
167
+#define TCG_TARGET_HAS_extract_i64 0
168
+#define TCG_TARGET_HAS_sextract_i64 0
169
+#define TCG_TARGET_HAS_extract2_i64 0
170
+#define TCG_TARGET_HAS_negsetcond_i64 0
171
+#define TCG_TARGET_HAS_add2_i64 0
172
+#define TCG_TARGET_HAS_sub2_i64 0
173
+#define TCG_TARGET_HAS_mulu2_i64 0
174
+#define TCG_TARGET_HAS_muls2_i64 0
175
+#define TCG_TARGET_HAS_muluh_i64 0
176
+#define TCG_TARGET_HAS_mulsh_i64 0
177
+/* Turn some undef macros into true macros. */
178
+#define TCG_TARGET_HAS_add2_i32 1
179
+#define TCG_TARGET_HAS_sub2_i32 1
180
+#endif
181
+
182
+#ifndef TCG_TARGET_deposit_i32_valid
183
+#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
184
+#endif
185
+#ifndef TCG_TARGET_deposit_i64_valid
186
+#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
187
+#endif
188
+#ifndef TCG_TARGET_extract_i32_valid
189
+#define TCG_TARGET_extract_i32_valid(ofs, len) 1
190
+#endif
191
+#ifndef TCG_TARGET_extract_i64_valid
192
+#define TCG_TARGET_extract_i64_valid(ofs, len) 1
193
+#endif
194
+
195
+/* Only one of DIV or DIV2 should be defined. */
196
+#if defined(TCG_TARGET_HAS_div_i32)
197
+#define TCG_TARGET_HAS_div2_i32 0
198
+#elif defined(TCG_TARGET_HAS_div2_i32)
199
+#define TCG_TARGET_HAS_div_i32 0
200
+#define TCG_TARGET_HAS_rem_i32 0
201
+#endif
202
+#if defined(TCG_TARGET_HAS_div_i64)
203
+#define TCG_TARGET_HAS_div2_i64 0
204
+#elif defined(TCG_TARGET_HAS_div2_i64)
205
+#define TCG_TARGET_HAS_div_i64 0
206
+#define TCG_TARGET_HAS_rem_i64 0
207
+#endif
208
+
209
+#if !defined(TCG_TARGET_HAS_v64) \
210
+ && !defined(TCG_TARGET_HAS_v128) \
211
+ && !defined(TCG_TARGET_HAS_v256)
212
+#define TCG_TARGET_MAYBE_vec 0
213
+#define TCG_TARGET_HAS_abs_vec 0
214
+#define TCG_TARGET_HAS_neg_vec 0
215
+#define TCG_TARGET_HAS_not_vec 0
216
+#define TCG_TARGET_HAS_andc_vec 0
217
+#define TCG_TARGET_HAS_orc_vec 0
218
+#define TCG_TARGET_HAS_nand_vec 0
219
+#define TCG_TARGET_HAS_nor_vec 0
220
+#define TCG_TARGET_HAS_eqv_vec 0
221
+#define TCG_TARGET_HAS_roti_vec 0
222
+#define TCG_TARGET_HAS_rots_vec 0
223
+#define TCG_TARGET_HAS_rotv_vec 0
224
+#define TCG_TARGET_HAS_shi_vec 0
225
+#define TCG_TARGET_HAS_shs_vec 0
226
+#define TCG_TARGET_HAS_shv_vec 0
227
+#define TCG_TARGET_HAS_mul_vec 0
228
+#define TCG_TARGET_HAS_sat_vec 0
229
+#define TCG_TARGET_HAS_minmax_vec 0
230
+#define TCG_TARGET_HAS_bitsel_vec 0
231
+#define TCG_TARGET_HAS_cmpsel_vec 0
232
+#define TCG_TARGET_HAS_tst_vec 0
233
+#else
234
+#define TCG_TARGET_MAYBE_vec 1
235
+#endif
236
+#ifndef TCG_TARGET_HAS_v64
237
+#define TCG_TARGET_HAS_v64 0
238
+#endif
239
+#ifndef TCG_TARGET_HAS_v128
240
+#define TCG_TARGET_HAS_v128 0
241
+#endif
242
+#ifndef TCG_TARGET_HAS_v256
243
+#define TCG_TARGET_HAS_v256 0
244
+#endif
245
+
246
+#endif
247
--
248
2.43.0
249
250
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-4-philmd@linaro.org>
4
---
5
tcg/aarch64/tcg-target-has.h | 119 +++++++++++++++++++++++++++++++++++
6
tcg/aarch64/tcg-target.h | 109 +-------------------------------
7
2 files changed, 120 insertions(+), 108 deletions(-)
8
create mode 100644 tcg/aarch64/tcg-target-has.h
1
9
10
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/aarch64/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: GPL-2.0-or-later */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH
20
+ */
21
+
22
+#ifndef TCG_TARGET_HAS_H
23
+#define TCG_TARGET_HAS_H
24
+
25
+#include "host/cpuinfo.h"
26
+
27
+#define have_lse (cpuinfo & CPUINFO_LSE)
28
+#define have_lse2 (cpuinfo & CPUINFO_LSE2)
29
+
30
+/* optional instructions */
31
+#define TCG_TARGET_HAS_div_i32 1
32
+#define TCG_TARGET_HAS_rem_i32 1
33
+#define TCG_TARGET_HAS_ext8s_i32 1
34
+#define TCG_TARGET_HAS_ext16s_i32 1
35
+#define TCG_TARGET_HAS_ext8u_i32 1
36
+#define TCG_TARGET_HAS_ext16u_i32 1
37
+#define TCG_TARGET_HAS_bswap16_i32 1
38
+#define TCG_TARGET_HAS_bswap32_i32 1
39
+#define TCG_TARGET_HAS_not_i32 1
40
+#define TCG_TARGET_HAS_rot_i32 1
41
+#define TCG_TARGET_HAS_andc_i32 1
42
+#define TCG_TARGET_HAS_orc_i32 1
43
+#define TCG_TARGET_HAS_eqv_i32 1
44
+#define TCG_TARGET_HAS_nand_i32 0
45
+#define TCG_TARGET_HAS_nor_i32 0
46
+#define TCG_TARGET_HAS_clz_i32 1
47
+#define TCG_TARGET_HAS_ctz_i32 1
48
+#define TCG_TARGET_HAS_ctpop_i32 0
49
+#define TCG_TARGET_HAS_deposit_i32 1
50
+#define TCG_TARGET_HAS_extract_i32 1
51
+#define TCG_TARGET_HAS_sextract_i32 1
52
+#define TCG_TARGET_HAS_extract2_i32 1
53
+#define TCG_TARGET_HAS_negsetcond_i32 1
54
+#define TCG_TARGET_HAS_add2_i32 1
55
+#define TCG_TARGET_HAS_sub2_i32 1
56
+#define TCG_TARGET_HAS_mulu2_i32 0
57
+#define TCG_TARGET_HAS_muls2_i32 0
58
+#define TCG_TARGET_HAS_muluh_i32 0
59
+#define TCG_TARGET_HAS_mulsh_i32 0
60
+#define TCG_TARGET_HAS_extr_i64_i32 0
61
+#define TCG_TARGET_HAS_qemu_st8_i32 0
62
+
63
+#define TCG_TARGET_HAS_div_i64 1
64
+#define TCG_TARGET_HAS_rem_i64 1
65
+#define TCG_TARGET_HAS_ext8s_i64 1
66
+#define TCG_TARGET_HAS_ext16s_i64 1
67
+#define TCG_TARGET_HAS_ext32s_i64 1
68
+#define TCG_TARGET_HAS_ext8u_i64 1
69
+#define TCG_TARGET_HAS_ext16u_i64 1
70
+#define TCG_TARGET_HAS_ext32u_i64 1
71
+#define TCG_TARGET_HAS_bswap16_i64 1
72
+#define TCG_TARGET_HAS_bswap32_i64 1
73
+#define TCG_TARGET_HAS_bswap64_i64 1
74
+#define TCG_TARGET_HAS_not_i64 1
75
+#define TCG_TARGET_HAS_rot_i64 1
76
+#define TCG_TARGET_HAS_andc_i64 1
77
+#define TCG_TARGET_HAS_orc_i64 1
78
+#define TCG_TARGET_HAS_eqv_i64 1
79
+#define TCG_TARGET_HAS_nand_i64 0
80
+#define TCG_TARGET_HAS_nor_i64 0
81
+#define TCG_TARGET_HAS_clz_i64 1
82
+#define TCG_TARGET_HAS_ctz_i64 1
83
+#define TCG_TARGET_HAS_ctpop_i64 0
84
+#define TCG_TARGET_HAS_deposit_i64 1
85
+#define TCG_TARGET_HAS_extract_i64 1
86
+#define TCG_TARGET_HAS_sextract_i64 1
87
+#define TCG_TARGET_HAS_extract2_i64 1
88
+#define TCG_TARGET_HAS_negsetcond_i64 1
89
+#define TCG_TARGET_HAS_add2_i64 1
90
+#define TCG_TARGET_HAS_sub2_i64 1
91
+#define TCG_TARGET_HAS_mulu2_i64 0
92
+#define TCG_TARGET_HAS_muls2_i64 0
93
+#define TCG_TARGET_HAS_muluh_i64 1
94
+#define TCG_TARGET_HAS_mulsh_i64 1
95
+
96
+/*
97
+ * Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
98
+ * which requires writable pages. We must defer to the helper for user-only,
99
+ * but in system mode all ram is writable for the host.
100
+ */
101
+#ifdef CONFIG_USER_ONLY
102
+#define TCG_TARGET_HAS_qemu_ldst_i128 have_lse2
103
+#else
104
+#define TCG_TARGET_HAS_qemu_ldst_i128 1
105
+#endif
106
+
107
+#define TCG_TARGET_HAS_tst 1
108
+
109
+#define TCG_TARGET_HAS_v64 1
110
+#define TCG_TARGET_HAS_v128 1
111
+#define TCG_TARGET_HAS_v256 0
112
+
113
+#define TCG_TARGET_HAS_andc_vec 1
114
+#define TCG_TARGET_HAS_orc_vec 1
115
+#define TCG_TARGET_HAS_nand_vec 0
116
+#define TCG_TARGET_HAS_nor_vec 0
117
+#define TCG_TARGET_HAS_eqv_vec 0
118
+#define TCG_TARGET_HAS_not_vec 1
119
+#define TCG_TARGET_HAS_neg_vec 1
120
+#define TCG_TARGET_HAS_abs_vec 1
121
+#define TCG_TARGET_HAS_roti_vec 0
122
+#define TCG_TARGET_HAS_rots_vec 0
123
+#define TCG_TARGET_HAS_rotv_vec 0
124
+#define TCG_TARGET_HAS_shi_vec 1
125
+#define TCG_TARGET_HAS_shs_vec 0
126
+#define TCG_TARGET_HAS_shv_vec 1
127
+#define TCG_TARGET_HAS_mul_vec 1
128
+#define TCG_TARGET_HAS_sat_vec 1
129
+#define TCG_TARGET_HAS_minmax_vec 1
130
+#define TCG_TARGET_HAS_bitsel_vec 1
131
+#define TCG_TARGET_HAS_cmpsel_vec 0
132
+#define TCG_TARGET_HAS_tst_vec 1
133
+
134
+#endif
135
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/aarch64/tcg-target.h
138
+++ b/tcg/aarch64/tcg-target.h
139
@@ -XXX,XX +XXX,XX @@
140
#ifndef AARCH64_TCG_TARGET_H
141
#define AARCH64_TCG_TARGET_H
142
143
-#include "host/cpuinfo.h"
144
-
145
#define TCG_TARGET_INSN_UNIT_SIZE 4
146
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
147
148
@@ -XXX,XX +XXX,XX @@ typedef enum {
149
150
#define TCG_TARGET_NB_REGS 64
151
152
-#define have_lse (cpuinfo & CPUINFO_LSE)
153
-#define have_lse2 (cpuinfo & CPUINFO_LSE2)
154
-
155
-/* optional instructions */
156
-#define TCG_TARGET_HAS_div_i32 1
157
-#define TCG_TARGET_HAS_rem_i32 1
158
-#define TCG_TARGET_HAS_ext8s_i32 1
159
-#define TCG_TARGET_HAS_ext16s_i32 1
160
-#define TCG_TARGET_HAS_ext8u_i32 1
161
-#define TCG_TARGET_HAS_ext16u_i32 1
162
-#define TCG_TARGET_HAS_bswap16_i32 1
163
-#define TCG_TARGET_HAS_bswap32_i32 1
164
-#define TCG_TARGET_HAS_not_i32 1
165
-#define TCG_TARGET_HAS_rot_i32 1
166
-#define TCG_TARGET_HAS_andc_i32 1
167
-#define TCG_TARGET_HAS_orc_i32 1
168
-#define TCG_TARGET_HAS_eqv_i32 1
169
-#define TCG_TARGET_HAS_nand_i32 0
170
-#define TCG_TARGET_HAS_nor_i32 0
171
-#define TCG_TARGET_HAS_clz_i32 1
172
-#define TCG_TARGET_HAS_ctz_i32 1
173
-#define TCG_TARGET_HAS_ctpop_i32 0
174
-#define TCG_TARGET_HAS_deposit_i32 1
175
-#define TCG_TARGET_HAS_extract_i32 1
176
-#define TCG_TARGET_HAS_sextract_i32 1
177
-#define TCG_TARGET_HAS_extract2_i32 1
178
-#define TCG_TARGET_HAS_negsetcond_i32 1
179
-#define TCG_TARGET_HAS_add2_i32 1
180
-#define TCG_TARGET_HAS_sub2_i32 1
181
-#define TCG_TARGET_HAS_mulu2_i32 0
182
-#define TCG_TARGET_HAS_muls2_i32 0
183
-#define TCG_TARGET_HAS_muluh_i32 0
184
-#define TCG_TARGET_HAS_mulsh_i32 0
185
-#define TCG_TARGET_HAS_extr_i64_i32 0
186
-#define TCG_TARGET_HAS_qemu_st8_i32 0
187
-
188
-#define TCG_TARGET_HAS_div_i64 1
189
-#define TCG_TARGET_HAS_rem_i64 1
190
-#define TCG_TARGET_HAS_ext8s_i64 1
191
-#define TCG_TARGET_HAS_ext16s_i64 1
192
-#define TCG_TARGET_HAS_ext32s_i64 1
193
-#define TCG_TARGET_HAS_ext8u_i64 1
194
-#define TCG_TARGET_HAS_ext16u_i64 1
195
-#define TCG_TARGET_HAS_ext32u_i64 1
196
-#define TCG_TARGET_HAS_bswap16_i64 1
197
-#define TCG_TARGET_HAS_bswap32_i64 1
198
-#define TCG_TARGET_HAS_bswap64_i64 1
199
-#define TCG_TARGET_HAS_not_i64 1
200
-#define TCG_TARGET_HAS_rot_i64 1
201
-#define TCG_TARGET_HAS_andc_i64 1
202
-#define TCG_TARGET_HAS_orc_i64 1
203
-#define TCG_TARGET_HAS_eqv_i64 1
204
-#define TCG_TARGET_HAS_nand_i64 0
205
-#define TCG_TARGET_HAS_nor_i64 0
206
-#define TCG_TARGET_HAS_clz_i64 1
207
-#define TCG_TARGET_HAS_ctz_i64 1
208
-#define TCG_TARGET_HAS_ctpop_i64 0
209
-#define TCG_TARGET_HAS_deposit_i64 1
210
-#define TCG_TARGET_HAS_extract_i64 1
211
-#define TCG_TARGET_HAS_sextract_i64 1
212
-#define TCG_TARGET_HAS_extract2_i64 1
213
-#define TCG_TARGET_HAS_negsetcond_i64 1
214
-#define TCG_TARGET_HAS_add2_i64 1
215
-#define TCG_TARGET_HAS_sub2_i64 1
216
-#define TCG_TARGET_HAS_mulu2_i64 0
217
-#define TCG_TARGET_HAS_muls2_i64 0
218
-#define TCG_TARGET_HAS_muluh_i64 1
219
-#define TCG_TARGET_HAS_mulsh_i64 1
220
-
221
-/*
222
- * Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
223
- * which requires writable pages. We must defer to the helper for user-only,
224
- * but in system mode all ram is writable for the host.
225
- */
226
-#ifdef CONFIG_USER_ONLY
227
-#define TCG_TARGET_HAS_qemu_ldst_i128 have_lse2
228
-#else
229
-#define TCG_TARGET_HAS_qemu_ldst_i128 1
230
-#endif
231
-
232
-#define TCG_TARGET_HAS_tst 1
233
-
234
-#define TCG_TARGET_HAS_v64 1
235
-#define TCG_TARGET_HAS_v128 1
236
-#define TCG_TARGET_HAS_v256 0
237
-
238
-#define TCG_TARGET_HAS_andc_vec 1
239
-#define TCG_TARGET_HAS_orc_vec 1
240
-#define TCG_TARGET_HAS_nand_vec 0
241
-#define TCG_TARGET_HAS_nor_vec 0
242
-#define TCG_TARGET_HAS_eqv_vec 0
243
-#define TCG_TARGET_HAS_not_vec 1
244
-#define TCG_TARGET_HAS_neg_vec 1
245
-#define TCG_TARGET_HAS_abs_vec 1
246
-#define TCG_TARGET_HAS_roti_vec 0
247
-#define TCG_TARGET_HAS_rots_vec 0
248
-#define TCG_TARGET_HAS_rotv_vec 0
249
-#define TCG_TARGET_HAS_shi_vec 1
250
-#define TCG_TARGET_HAS_shs_vec 0
251
-#define TCG_TARGET_HAS_shv_vec 1
252
-#define TCG_TARGET_HAS_mul_vec 1
253
-#define TCG_TARGET_HAS_sat_vec 1
254
-#define TCG_TARGET_HAS_minmax_vec 1
255
-#define TCG_TARGET_HAS_bitsel_vec 1
256
-#define TCG_TARGET_HAS_cmpsel_vec 0
257
-#define TCG_TARGET_HAS_tst_vec 1
258
+#include "tcg-target-has.h"
259
260
#define TCG_TARGET_DEFAULT_MO (0)
261
262
--
263
2.43.0
264
265
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-5-philmd@linaro.org>
4
---
5
tcg/arm/tcg-target-has.h | 85 ++++++++++++++++++++++++++++++++++++++++
6
tcg/arm/tcg-target.h | 74 +---------------------------------
7
2 files changed, 86 insertions(+), 73 deletions(-)
8
create mode 100644 tcg/arm/tcg-target-has.h
1
9
10
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/arm/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: MIT */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2008 Fabrice Bellard
20
+ * Copyright (c) 2008 Andrzej Zaborowski
21
+ */
22
+
23
+#ifndef TCG_TARGET_HAS_H
24
+#define TCG_TARGET_HAS_H
25
+
26
+extern int arm_arch;
27
+
28
+#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
29
+
30
+#ifdef __ARM_ARCH_EXT_IDIV__
31
+#define use_idiv_instructions 1
32
+#else
33
+extern bool use_idiv_instructions;
34
+#endif
35
+#ifdef __ARM_NEON__
36
+#define use_neon_instructions 1
37
+#else
38
+extern bool use_neon_instructions;
39
+#endif
40
+
41
+/* optional instructions */
42
+#define TCG_TARGET_HAS_ext8s_i32 1
43
+#define TCG_TARGET_HAS_ext16s_i32 1
44
+#define TCG_TARGET_HAS_ext8u_i32 0 /* and r0, r1, #0xff */
45
+#define TCG_TARGET_HAS_ext16u_i32 1
46
+#define TCG_TARGET_HAS_bswap16_i32 1
47
+#define TCG_TARGET_HAS_bswap32_i32 1
48
+#define TCG_TARGET_HAS_not_i32 1
49
+#define TCG_TARGET_HAS_rot_i32 1
50
+#define TCG_TARGET_HAS_andc_i32 1
51
+#define TCG_TARGET_HAS_orc_i32 0
52
+#define TCG_TARGET_HAS_eqv_i32 0
53
+#define TCG_TARGET_HAS_nand_i32 0
54
+#define TCG_TARGET_HAS_nor_i32 0
55
+#define TCG_TARGET_HAS_clz_i32 1
56
+#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
57
+#define TCG_TARGET_HAS_ctpop_i32 0
58
+#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
59
+#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions
60
+#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
61
+#define TCG_TARGET_HAS_extract2_i32 1
62
+#define TCG_TARGET_HAS_negsetcond_i32 1
63
+#define TCG_TARGET_HAS_mulu2_i32 1
64
+#define TCG_TARGET_HAS_muls2_i32 1
65
+#define TCG_TARGET_HAS_muluh_i32 0
66
+#define TCG_TARGET_HAS_mulsh_i32 0
67
+#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
68
+#define TCG_TARGET_HAS_rem_i32 0
69
+#define TCG_TARGET_HAS_qemu_st8_i32 0
70
+
71
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
72
+
73
+#define TCG_TARGET_HAS_tst 1
74
+
75
+#define TCG_TARGET_HAS_v64 use_neon_instructions
76
+#define TCG_TARGET_HAS_v128 use_neon_instructions
77
+#define TCG_TARGET_HAS_v256 0
78
+
79
+#define TCG_TARGET_HAS_andc_vec 1
80
+#define TCG_TARGET_HAS_orc_vec 1
81
+#define TCG_TARGET_HAS_nand_vec 0
82
+#define TCG_TARGET_HAS_nor_vec 0
83
+#define TCG_TARGET_HAS_eqv_vec 0
84
+#define TCG_TARGET_HAS_not_vec 1
85
+#define TCG_TARGET_HAS_neg_vec 1
86
+#define TCG_TARGET_HAS_abs_vec 1
87
+#define TCG_TARGET_HAS_roti_vec 0
88
+#define TCG_TARGET_HAS_rots_vec 0
89
+#define TCG_TARGET_HAS_rotv_vec 0
90
+#define TCG_TARGET_HAS_shi_vec 1
91
+#define TCG_TARGET_HAS_shs_vec 0
92
+#define TCG_TARGET_HAS_shv_vec 0
93
+#define TCG_TARGET_HAS_mul_vec 1
94
+#define TCG_TARGET_HAS_sat_vec 1
95
+#define TCG_TARGET_HAS_minmax_vec 1
96
+#define TCG_TARGET_HAS_bitsel_vec 1
97
+#define TCG_TARGET_HAS_cmpsel_vec 0
98
+#define TCG_TARGET_HAS_tst_vec 1
99
+
100
+#endif
101
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
102
index XXXXXXX..XXXXXXX 100644
103
--- a/tcg/arm/tcg-target.h
104
+++ b/tcg/arm/tcg-target.h
105
@@ -XXX,XX +XXX,XX @@
106
#ifndef ARM_TCG_TARGET_H
107
#define ARM_TCG_TARGET_H
108
109
-extern int arm_arch;
110
-
111
-#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
112
-
113
#define TCG_TARGET_INSN_UNIT_SIZE 4
114
#define MAX_CODE_GEN_BUFFER_SIZE UINT32_MAX
115
116
@@ -XXX,XX +XXX,XX @@ typedef enum {
117
118
#define TCG_TARGET_NB_REGS 32
119
120
-#ifdef __ARM_ARCH_EXT_IDIV__
121
-#define use_idiv_instructions 1
122
-#else
123
-extern bool use_idiv_instructions;
124
-#endif
125
-#ifdef __ARM_NEON__
126
-#define use_neon_instructions 1
127
-#else
128
-extern bool use_neon_instructions;
129
-#endif
130
-
131
-/* optional instructions */
132
-#define TCG_TARGET_HAS_ext8s_i32 1
133
-#define TCG_TARGET_HAS_ext16s_i32 1
134
-#define TCG_TARGET_HAS_ext8u_i32 0 /* and r0, r1, #0xff */
135
-#define TCG_TARGET_HAS_ext16u_i32 1
136
-#define TCG_TARGET_HAS_bswap16_i32 1
137
-#define TCG_TARGET_HAS_bswap32_i32 1
138
-#define TCG_TARGET_HAS_not_i32 1
139
-#define TCG_TARGET_HAS_rot_i32 1
140
-#define TCG_TARGET_HAS_andc_i32 1
141
-#define TCG_TARGET_HAS_orc_i32 0
142
-#define TCG_TARGET_HAS_eqv_i32 0
143
-#define TCG_TARGET_HAS_nand_i32 0
144
-#define TCG_TARGET_HAS_nor_i32 0
145
-#define TCG_TARGET_HAS_clz_i32 1
146
-#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
147
-#define TCG_TARGET_HAS_ctpop_i32 0
148
-#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
149
-#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions
150
-#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
151
-#define TCG_TARGET_HAS_extract2_i32 1
152
-#define TCG_TARGET_HAS_negsetcond_i32 1
153
-#define TCG_TARGET_HAS_mulu2_i32 1
154
-#define TCG_TARGET_HAS_muls2_i32 1
155
-#define TCG_TARGET_HAS_muluh_i32 0
156
-#define TCG_TARGET_HAS_mulsh_i32 0
157
-#define TCG_TARGET_HAS_div_i32 use_idiv_instructions
158
-#define TCG_TARGET_HAS_rem_i32 0
159
-#define TCG_TARGET_HAS_qemu_st8_i32 0
160
-
161
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
162
-
163
-#define TCG_TARGET_HAS_tst 1
164
-
165
-#define TCG_TARGET_HAS_v64 use_neon_instructions
166
-#define TCG_TARGET_HAS_v128 use_neon_instructions
167
-#define TCG_TARGET_HAS_v256 0
168
-
169
-#define TCG_TARGET_HAS_andc_vec 1
170
-#define TCG_TARGET_HAS_orc_vec 1
171
-#define TCG_TARGET_HAS_nand_vec 0
172
-#define TCG_TARGET_HAS_nor_vec 0
173
-#define TCG_TARGET_HAS_eqv_vec 0
174
-#define TCG_TARGET_HAS_not_vec 1
175
-#define TCG_TARGET_HAS_neg_vec 1
176
-#define TCG_TARGET_HAS_abs_vec 1
177
-#define TCG_TARGET_HAS_roti_vec 0
178
-#define TCG_TARGET_HAS_rots_vec 0
179
-#define TCG_TARGET_HAS_rotv_vec 0
180
-#define TCG_TARGET_HAS_shi_vec 1
181
-#define TCG_TARGET_HAS_shs_vec 0
182
-#define TCG_TARGET_HAS_shv_vec 0
183
-#define TCG_TARGET_HAS_mul_vec 1
184
-#define TCG_TARGET_HAS_sat_vec 1
185
-#define TCG_TARGET_HAS_minmax_vec 1
186
-#define TCG_TARGET_HAS_bitsel_vec 1
187
-#define TCG_TARGET_HAS_cmpsel_vec 0
188
-#define TCG_TARGET_HAS_tst_vec 1
189
+#include "tcg-target-has.h"
190
191
#define TCG_TARGET_DEFAULT_MO (0)
192
193
--
194
2.43.0
195
196
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-6-philmd@linaro.org>
4
---
5
tcg/i386/tcg-target-has.h | 139 ++++++++++++++++++++++++++++++++++++++
6
tcg/i386/tcg-target.h | 129 +----------------------------------
7
2 files changed, 140 insertions(+), 128 deletions(-)
8
create mode 100644 tcg/i386/tcg-target-has.h
1
9
10
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/i386/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: MIT */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2008 Fabrice Bellard
20
+ */
21
+
22
+#ifndef TCG_TARGET_HAS_H
23
+#define TCG_TARGET_HAS_H
24
+
25
+#include "host/cpuinfo.h"
26
+
27
+#define have_bmi1 (cpuinfo & CPUINFO_BMI1)
28
+#define have_popcnt (cpuinfo & CPUINFO_POPCNT)
29
+#define have_avx1 (cpuinfo & CPUINFO_AVX1)
30
+#define have_avx2 (cpuinfo & CPUINFO_AVX2)
31
+#define have_movbe (cpuinfo & CPUINFO_MOVBE)
32
+
33
+/*
34
+ * There are interesting instructions in AVX512, so long as we have AVX512VL,
35
+ * which indicates support for EVEX on sizes smaller than 512 bits.
36
+ */
37
+#define have_avx512vl ((cpuinfo & CPUINFO_AVX512VL) && \
38
+ (cpuinfo & CPUINFO_AVX512F))
39
+#define have_avx512bw ((cpuinfo & CPUINFO_AVX512BW) && have_avx512vl)
40
+#define have_avx512dq ((cpuinfo & CPUINFO_AVX512DQ) && have_avx512vl)
41
+#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
42
+
43
+/* optional instructions */
44
+#define TCG_TARGET_HAS_div2_i32 1
45
+#define TCG_TARGET_HAS_rot_i32 1
46
+#define TCG_TARGET_HAS_ext8s_i32 1
47
+#define TCG_TARGET_HAS_ext16s_i32 1
48
+#define TCG_TARGET_HAS_ext8u_i32 1
49
+#define TCG_TARGET_HAS_ext16u_i32 1
50
+#define TCG_TARGET_HAS_bswap16_i32 1
51
+#define TCG_TARGET_HAS_bswap32_i32 1
52
+#define TCG_TARGET_HAS_not_i32 1
53
+#define TCG_TARGET_HAS_andc_i32 have_bmi1
54
+#define TCG_TARGET_HAS_orc_i32 0
55
+#define TCG_TARGET_HAS_eqv_i32 0
56
+#define TCG_TARGET_HAS_nand_i32 0
57
+#define TCG_TARGET_HAS_nor_i32 0
58
+#define TCG_TARGET_HAS_clz_i32 1
59
+#define TCG_TARGET_HAS_ctz_i32 1
60
+#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
61
+#define TCG_TARGET_HAS_deposit_i32 1
62
+#define TCG_TARGET_HAS_extract_i32 1
63
+#define TCG_TARGET_HAS_sextract_i32 1
64
+#define TCG_TARGET_HAS_extract2_i32 1
65
+#define TCG_TARGET_HAS_negsetcond_i32 1
66
+#define TCG_TARGET_HAS_add2_i32 1
67
+#define TCG_TARGET_HAS_sub2_i32 1
68
+#define TCG_TARGET_HAS_mulu2_i32 1
69
+#define TCG_TARGET_HAS_muls2_i32 1
70
+#define TCG_TARGET_HAS_muluh_i32 0
71
+#define TCG_TARGET_HAS_mulsh_i32 0
72
+
73
+#if TCG_TARGET_REG_BITS == 64
74
+/* Keep 32-bit values zero-extended in a register. */
75
+#define TCG_TARGET_HAS_extr_i64_i32 1
76
+#define TCG_TARGET_HAS_div2_i64 1
77
+#define TCG_TARGET_HAS_rot_i64 1
78
+#define TCG_TARGET_HAS_ext8s_i64 1
79
+#define TCG_TARGET_HAS_ext16s_i64 1
80
+#define TCG_TARGET_HAS_ext32s_i64 1
81
+#define TCG_TARGET_HAS_ext8u_i64 1
82
+#define TCG_TARGET_HAS_ext16u_i64 1
83
+#define TCG_TARGET_HAS_ext32u_i64 1
84
+#define TCG_TARGET_HAS_bswap16_i64 1
85
+#define TCG_TARGET_HAS_bswap32_i64 1
86
+#define TCG_TARGET_HAS_bswap64_i64 1
87
+#define TCG_TARGET_HAS_not_i64 1
88
+#define TCG_TARGET_HAS_andc_i64 have_bmi1
89
+#define TCG_TARGET_HAS_orc_i64 0
90
+#define TCG_TARGET_HAS_eqv_i64 0
91
+#define TCG_TARGET_HAS_nand_i64 0
92
+#define TCG_TARGET_HAS_nor_i64 0
93
+#define TCG_TARGET_HAS_clz_i64 1
94
+#define TCG_TARGET_HAS_ctz_i64 1
95
+#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
96
+#define TCG_TARGET_HAS_deposit_i64 1
97
+#define TCG_TARGET_HAS_extract_i64 1
98
+#define TCG_TARGET_HAS_sextract_i64 0
99
+#define TCG_TARGET_HAS_extract2_i64 1
100
+#define TCG_TARGET_HAS_negsetcond_i64 1
101
+#define TCG_TARGET_HAS_add2_i64 1
102
+#define TCG_TARGET_HAS_sub2_i64 1
103
+#define TCG_TARGET_HAS_mulu2_i64 1
104
+#define TCG_TARGET_HAS_muls2_i64 1
105
+#define TCG_TARGET_HAS_muluh_i64 0
106
+#define TCG_TARGET_HAS_mulsh_i64 0
107
+#define TCG_TARGET_HAS_qemu_st8_i32 0
108
+#else
109
+#define TCG_TARGET_HAS_qemu_st8_i32 1
110
+#endif
111
+
112
+#define TCG_TARGET_HAS_qemu_ldst_i128 \
113
+ (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA))
114
+
115
+#define TCG_TARGET_HAS_tst 1
116
+
117
+/* We do not support older SSE systems, only beginning with AVX1. */
118
+#define TCG_TARGET_HAS_v64 have_avx1
119
+#define TCG_TARGET_HAS_v128 have_avx1
120
+#define TCG_TARGET_HAS_v256 have_avx2
121
+
122
+#define TCG_TARGET_HAS_andc_vec 1
123
+#define TCG_TARGET_HAS_orc_vec have_avx512vl
124
+#define TCG_TARGET_HAS_nand_vec have_avx512vl
125
+#define TCG_TARGET_HAS_nor_vec have_avx512vl
126
+#define TCG_TARGET_HAS_eqv_vec have_avx512vl
127
+#define TCG_TARGET_HAS_not_vec have_avx512vl
128
+#define TCG_TARGET_HAS_neg_vec 0
129
+#define TCG_TARGET_HAS_abs_vec 1
130
+#define TCG_TARGET_HAS_roti_vec have_avx512vl
131
+#define TCG_TARGET_HAS_rots_vec 0
132
+#define TCG_TARGET_HAS_rotv_vec have_avx512vl
133
+#define TCG_TARGET_HAS_shi_vec 1
134
+#define TCG_TARGET_HAS_shs_vec 1
135
+#define TCG_TARGET_HAS_shv_vec have_avx2
136
+#define TCG_TARGET_HAS_mul_vec 1
137
+#define TCG_TARGET_HAS_sat_vec 1
138
+#define TCG_TARGET_HAS_minmax_vec 1
139
+#define TCG_TARGET_HAS_bitsel_vec have_avx512vl
140
+#define TCG_TARGET_HAS_cmpsel_vec 1
141
+#define TCG_TARGET_HAS_tst_vec have_avx512bw
142
+
143
+#define TCG_TARGET_deposit_i32_valid(ofs, len) \
144
+ (((ofs) == 0 && ((len) == 8 || (len) == 16)) || \
145
+ (TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8))
146
+#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid
147
+
148
+/* Check for the possibility of high-byte extraction and, for 64-bit,
149
+ zero-extending 32-bit right-shift. */
150
+#define TCG_TARGET_extract_i32_valid(ofs, len) ((ofs) == 8 && (len) == 8)
151
+#define TCG_TARGET_extract_i64_valid(ofs, len) \
152
+ (((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
153
+
154
+#endif
155
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
156
index XXXXXXX..XXXXXXX 100644
157
--- a/tcg/i386/tcg-target.h
158
+++ b/tcg/i386/tcg-target.h
159
@@ -XXX,XX +XXX,XX @@
160
#ifndef I386_TCG_TARGET_H
161
#define I386_TCG_TARGET_H
162
163
-#include "host/cpuinfo.h"
164
-
165
#define TCG_TARGET_INSN_UNIT_SIZE 1
166
167
#ifdef __x86_64__
168
@@ -XXX,XX +XXX,XX @@ typedef enum {
169
TCG_REG_CALL_STACK = TCG_REG_ESP
170
} TCGReg;
171
172
-#define have_bmi1 (cpuinfo & CPUINFO_BMI1)
173
-#define have_popcnt (cpuinfo & CPUINFO_POPCNT)
174
-#define have_avx1 (cpuinfo & CPUINFO_AVX1)
175
-#define have_avx2 (cpuinfo & CPUINFO_AVX2)
176
-#define have_movbe (cpuinfo & CPUINFO_MOVBE)
177
-
178
-/*
179
- * There are interesting instructions in AVX512, so long as we have AVX512VL,
180
- * which indicates support for EVEX on sizes smaller than 512 bits.
181
- */
182
-#define have_avx512vl ((cpuinfo & CPUINFO_AVX512VL) && \
183
- (cpuinfo & CPUINFO_AVX512F))
184
-#define have_avx512bw ((cpuinfo & CPUINFO_AVX512BW) && have_avx512vl)
185
-#define have_avx512dq ((cpuinfo & CPUINFO_AVX512DQ) && have_avx512vl)
186
-#define have_avx512vbmi2 ((cpuinfo & CPUINFO_AVX512VBMI2) && have_avx512vl)
187
-
188
-/* optional instructions */
189
-#define TCG_TARGET_HAS_div2_i32 1
190
-#define TCG_TARGET_HAS_rot_i32 1
191
-#define TCG_TARGET_HAS_ext8s_i32 1
192
-#define TCG_TARGET_HAS_ext16s_i32 1
193
-#define TCG_TARGET_HAS_ext8u_i32 1
194
-#define TCG_TARGET_HAS_ext16u_i32 1
195
-#define TCG_TARGET_HAS_bswap16_i32 1
196
-#define TCG_TARGET_HAS_bswap32_i32 1
197
-#define TCG_TARGET_HAS_not_i32 1
198
-#define TCG_TARGET_HAS_andc_i32 have_bmi1
199
-#define TCG_TARGET_HAS_orc_i32 0
200
-#define TCG_TARGET_HAS_eqv_i32 0
201
-#define TCG_TARGET_HAS_nand_i32 0
202
-#define TCG_TARGET_HAS_nor_i32 0
203
-#define TCG_TARGET_HAS_clz_i32 1
204
-#define TCG_TARGET_HAS_ctz_i32 1
205
-#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
206
-#define TCG_TARGET_HAS_deposit_i32 1
207
-#define TCG_TARGET_HAS_extract_i32 1
208
-#define TCG_TARGET_HAS_sextract_i32 1
209
-#define TCG_TARGET_HAS_extract2_i32 1
210
-#define TCG_TARGET_HAS_negsetcond_i32 1
211
-#define TCG_TARGET_HAS_add2_i32 1
212
-#define TCG_TARGET_HAS_sub2_i32 1
213
-#define TCG_TARGET_HAS_mulu2_i32 1
214
-#define TCG_TARGET_HAS_muls2_i32 1
215
-#define TCG_TARGET_HAS_muluh_i32 0
216
-#define TCG_TARGET_HAS_mulsh_i32 0
217
-
218
-#if TCG_TARGET_REG_BITS == 64
219
-/* Keep 32-bit values zero-extended in a register. */
220
-#define TCG_TARGET_HAS_extr_i64_i32 1
221
-#define TCG_TARGET_HAS_div2_i64 1
222
-#define TCG_TARGET_HAS_rot_i64 1
223
-#define TCG_TARGET_HAS_ext8s_i64 1
224
-#define TCG_TARGET_HAS_ext16s_i64 1
225
-#define TCG_TARGET_HAS_ext32s_i64 1
226
-#define TCG_TARGET_HAS_ext8u_i64 1
227
-#define TCG_TARGET_HAS_ext16u_i64 1
228
-#define TCG_TARGET_HAS_ext32u_i64 1
229
-#define TCG_TARGET_HAS_bswap16_i64 1
230
-#define TCG_TARGET_HAS_bswap32_i64 1
231
-#define TCG_TARGET_HAS_bswap64_i64 1
232
-#define TCG_TARGET_HAS_not_i64 1
233
-#define TCG_TARGET_HAS_andc_i64 have_bmi1
234
-#define TCG_TARGET_HAS_orc_i64 0
235
-#define TCG_TARGET_HAS_eqv_i64 0
236
-#define TCG_TARGET_HAS_nand_i64 0
237
-#define TCG_TARGET_HAS_nor_i64 0
238
-#define TCG_TARGET_HAS_clz_i64 1
239
-#define TCG_TARGET_HAS_ctz_i64 1
240
-#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
241
-#define TCG_TARGET_HAS_deposit_i64 1
242
-#define TCG_TARGET_HAS_extract_i64 1
243
-#define TCG_TARGET_HAS_sextract_i64 0
244
-#define TCG_TARGET_HAS_extract2_i64 1
245
-#define TCG_TARGET_HAS_negsetcond_i64 1
246
-#define TCG_TARGET_HAS_add2_i64 1
247
-#define TCG_TARGET_HAS_sub2_i64 1
248
-#define TCG_TARGET_HAS_mulu2_i64 1
249
-#define TCG_TARGET_HAS_muls2_i64 1
250
-#define TCG_TARGET_HAS_muluh_i64 0
251
-#define TCG_TARGET_HAS_mulsh_i64 0
252
-#define TCG_TARGET_HAS_qemu_st8_i32 0
253
-#else
254
-#define TCG_TARGET_HAS_qemu_st8_i32 1
255
-#endif
256
-
257
-#define TCG_TARGET_HAS_qemu_ldst_i128 \
258
- (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA))
259
-
260
-#define TCG_TARGET_HAS_tst 1
261
-
262
-/* We do not support older SSE systems, only beginning with AVX1. */
263
-#define TCG_TARGET_HAS_v64 have_avx1
264
-#define TCG_TARGET_HAS_v128 have_avx1
265
-#define TCG_TARGET_HAS_v256 have_avx2
266
-
267
-#define TCG_TARGET_HAS_andc_vec 1
268
-#define TCG_TARGET_HAS_orc_vec have_avx512vl
269
-#define TCG_TARGET_HAS_nand_vec have_avx512vl
270
-#define TCG_TARGET_HAS_nor_vec have_avx512vl
271
-#define TCG_TARGET_HAS_eqv_vec have_avx512vl
272
-#define TCG_TARGET_HAS_not_vec have_avx512vl
273
-#define TCG_TARGET_HAS_neg_vec 0
274
-#define TCG_TARGET_HAS_abs_vec 1
275
-#define TCG_TARGET_HAS_roti_vec have_avx512vl
276
-#define TCG_TARGET_HAS_rots_vec 0
277
-#define TCG_TARGET_HAS_rotv_vec have_avx512vl
278
-#define TCG_TARGET_HAS_shi_vec 1
279
-#define TCG_TARGET_HAS_shs_vec 1
280
-#define TCG_TARGET_HAS_shv_vec have_avx2
281
-#define TCG_TARGET_HAS_mul_vec 1
282
-#define TCG_TARGET_HAS_sat_vec 1
283
-#define TCG_TARGET_HAS_minmax_vec 1
284
-#define TCG_TARGET_HAS_bitsel_vec have_avx512vl
285
-#define TCG_TARGET_HAS_cmpsel_vec 1
286
-#define TCG_TARGET_HAS_tst_vec have_avx512bw
287
-
288
-#define TCG_TARGET_deposit_i32_valid(ofs, len) \
289
- (((ofs) == 0 && ((len) == 8 || (len) == 16)) || \
290
- (TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8))
291
-#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid
292
-
293
-/* Check for the possibility of high-byte extraction and, for 64-bit,
294
- zero-extending 32-bit right-shift. */
295
-#define TCG_TARGET_extract_i32_valid(ofs, len) ((ofs) == 8 && (len) == 8)
296
-#define TCG_TARGET_extract_i64_valid(ofs, len) \
297
- (((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
298
+#include "tcg-target-has.h"
299
300
/* This defines the natural memory order supported by this
301
* architecture before guarantees made by various barrier
302
--
303
2.43.0
304
305
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-7-philmd@linaro.org>
4
---
5
tcg/loongarch64/tcg-target-has.h | 113 +++++++++++++++++++++++++++++++
6
tcg/loongarch64/tcg-target.h | 102 +---------------------------
7
2 files changed, 114 insertions(+), 101 deletions(-)
8
create mode 100644 tcg/loongarch64/tcg-target-has.h
1
9
10
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/loongarch64/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: MIT */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
20
+ */
21
+
22
+#ifndef TCG_TARGET_HAS_H
23
+#define TCG_TARGET_HAS_H
24
+
25
+#include "host/cpuinfo.h"
26
+
27
+/* optional instructions */
28
+#define TCG_TARGET_HAS_negsetcond_i32 0
29
+#define TCG_TARGET_HAS_div_i32 1
30
+#define TCG_TARGET_HAS_rem_i32 1
31
+#define TCG_TARGET_HAS_div2_i32 0
32
+#define TCG_TARGET_HAS_rot_i32 1
33
+#define TCG_TARGET_HAS_deposit_i32 1
34
+#define TCG_TARGET_HAS_extract_i32 1
35
+#define TCG_TARGET_HAS_sextract_i32 0
36
+#define TCG_TARGET_HAS_extract2_i32 0
37
+#define TCG_TARGET_HAS_add2_i32 0
38
+#define TCG_TARGET_HAS_sub2_i32 0
39
+#define TCG_TARGET_HAS_mulu2_i32 0
40
+#define TCG_TARGET_HAS_muls2_i32 0
41
+#define TCG_TARGET_HAS_muluh_i32 1
42
+#define TCG_TARGET_HAS_mulsh_i32 1
43
+#define TCG_TARGET_HAS_ext8s_i32 1
44
+#define TCG_TARGET_HAS_ext16s_i32 1
45
+#define TCG_TARGET_HAS_ext8u_i32 1
46
+#define TCG_TARGET_HAS_ext16u_i32 1
47
+#define TCG_TARGET_HAS_bswap16_i32 1
48
+#define TCG_TARGET_HAS_bswap32_i32 1
49
+#define TCG_TARGET_HAS_not_i32 1
50
+#define TCG_TARGET_HAS_andc_i32 1
51
+#define TCG_TARGET_HAS_orc_i32 1
52
+#define TCG_TARGET_HAS_eqv_i32 0
53
+#define TCG_TARGET_HAS_nand_i32 0
54
+#define TCG_TARGET_HAS_nor_i32 1
55
+#define TCG_TARGET_HAS_clz_i32 1
56
+#define TCG_TARGET_HAS_ctz_i32 1
57
+#define TCG_TARGET_HAS_ctpop_i32 0
58
+#define TCG_TARGET_HAS_brcond2 0
59
+#define TCG_TARGET_HAS_setcond2 0
60
+#define TCG_TARGET_HAS_qemu_st8_i32 0
61
+
62
+/* 64-bit operations */
63
+#define TCG_TARGET_HAS_negsetcond_i64 0
64
+#define TCG_TARGET_HAS_div_i64 1
65
+#define TCG_TARGET_HAS_rem_i64 1
66
+#define TCG_TARGET_HAS_div2_i64 0
67
+#define TCG_TARGET_HAS_rot_i64 1
68
+#define TCG_TARGET_HAS_deposit_i64 1
69
+#define TCG_TARGET_HAS_extract_i64 1
70
+#define TCG_TARGET_HAS_sextract_i64 0
71
+#define TCG_TARGET_HAS_extract2_i64 0
72
+#define TCG_TARGET_HAS_extr_i64_i32 1
73
+#define TCG_TARGET_HAS_ext8s_i64 1
74
+#define TCG_TARGET_HAS_ext16s_i64 1
75
+#define TCG_TARGET_HAS_ext32s_i64 1
76
+#define TCG_TARGET_HAS_ext8u_i64 1
77
+#define TCG_TARGET_HAS_ext16u_i64 1
78
+#define TCG_TARGET_HAS_ext32u_i64 1
79
+#define TCG_TARGET_HAS_bswap16_i64 1
80
+#define TCG_TARGET_HAS_bswap32_i64 1
81
+#define TCG_TARGET_HAS_bswap64_i64 1
82
+#define TCG_TARGET_HAS_not_i64 1
83
+#define TCG_TARGET_HAS_andc_i64 1
84
+#define TCG_TARGET_HAS_orc_i64 1
85
+#define TCG_TARGET_HAS_eqv_i64 0
86
+#define TCG_TARGET_HAS_nand_i64 0
87
+#define TCG_TARGET_HAS_nor_i64 1
88
+#define TCG_TARGET_HAS_clz_i64 1
89
+#define TCG_TARGET_HAS_ctz_i64 1
90
+#define TCG_TARGET_HAS_ctpop_i64 0
91
+#define TCG_TARGET_HAS_add2_i64 0
92
+#define TCG_TARGET_HAS_sub2_i64 0
93
+#define TCG_TARGET_HAS_mulu2_i64 0
94
+#define TCG_TARGET_HAS_muls2_i64 0
95
+#define TCG_TARGET_HAS_muluh_i64 1
96
+#define TCG_TARGET_HAS_mulsh_i64 1
97
+
98
+#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
99
+
100
+#define TCG_TARGET_HAS_tst 0
101
+
102
+#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_LSX)
103
+#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX)
104
+#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_LASX)
105
+
106
+#define TCG_TARGET_HAS_not_vec 1
107
+#define TCG_TARGET_HAS_neg_vec 1
108
+#define TCG_TARGET_HAS_abs_vec 0
109
+#define TCG_TARGET_HAS_andc_vec 1
110
+#define TCG_TARGET_HAS_orc_vec 1
111
+#define TCG_TARGET_HAS_nand_vec 0
112
+#define TCG_TARGET_HAS_nor_vec 1
113
+#define TCG_TARGET_HAS_eqv_vec 0
114
+#define TCG_TARGET_HAS_mul_vec 1
115
+#define TCG_TARGET_HAS_shi_vec 1
116
+#define TCG_TARGET_HAS_shs_vec 0
117
+#define TCG_TARGET_HAS_shv_vec 1
118
+#define TCG_TARGET_HAS_roti_vec 1
119
+#define TCG_TARGET_HAS_rots_vec 0
120
+#define TCG_TARGET_HAS_rotv_vec 1
121
+#define TCG_TARGET_HAS_sat_vec 1
122
+#define TCG_TARGET_HAS_minmax_vec 1
123
+#define TCG_TARGET_HAS_bitsel_vec 1
124
+#define TCG_TARGET_HAS_cmpsel_vec 0
125
+#define TCG_TARGET_HAS_tst_vec 0
126
+
127
+
128
+#endif
129
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
130
index XXXXXXX..XXXXXXX 100644
131
--- a/tcg/loongarch64/tcg-target.h
132
+++ b/tcg/loongarch64/tcg-target.h
133
@@ -XXX,XX +XXX,XX @@
134
#ifndef LOONGARCH_TCG_TARGET_H
135
#define LOONGARCH_TCG_TARGET_H
136
137
-#include "host/cpuinfo.h"
138
-
139
#define TCG_TARGET_INSN_UNIT_SIZE 4
140
#define TCG_TARGET_NB_REGS 64
141
142
@@ -XXX,XX +XXX,XX @@ typedef enum {
143
TCG_VEC_TMP0 = TCG_REG_V23,
144
} TCGReg;
145
146
-/* optional instructions */
147
-#define TCG_TARGET_HAS_negsetcond_i32 0
148
-#define TCG_TARGET_HAS_div_i32 1
149
-#define TCG_TARGET_HAS_rem_i32 1
150
-#define TCG_TARGET_HAS_div2_i32 0
151
-#define TCG_TARGET_HAS_rot_i32 1
152
-#define TCG_TARGET_HAS_deposit_i32 1
153
-#define TCG_TARGET_HAS_extract_i32 1
154
-#define TCG_TARGET_HAS_sextract_i32 0
155
-#define TCG_TARGET_HAS_extract2_i32 0
156
-#define TCG_TARGET_HAS_add2_i32 0
157
-#define TCG_TARGET_HAS_sub2_i32 0
158
-#define TCG_TARGET_HAS_mulu2_i32 0
159
-#define TCG_TARGET_HAS_muls2_i32 0
160
-#define TCG_TARGET_HAS_muluh_i32 1
161
-#define TCG_TARGET_HAS_mulsh_i32 1
162
-#define TCG_TARGET_HAS_ext8s_i32 1
163
-#define TCG_TARGET_HAS_ext16s_i32 1
164
-#define TCG_TARGET_HAS_ext8u_i32 1
165
-#define TCG_TARGET_HAS_ext16u_i32 1
166
-#define TCG_TARGET_HAS_bswap16_i32 1
167
-#define TCG_TARGET_HAS_bswap32_i32 1
168
-#define TCG_TARGET_HAS_not_i32 1
169
-#define TCG_TARGET_HAS_andc_i32 1
170
-#define TCG_TARGET_HAS_orc_i32 1
171
-#define TCG_TARGET_HAS_eqv_i32 0
172
-#define TCG_TARGET_HAS_nand_i32 0
173
-#define TCG_TARGET_HAS_nor_i32 1
174
-#define TCG_TARGET_HAS_clz_i32 1
175
-#define TCG_TARGET_HAS_ctz_i32 1
176
-#define TCG_TARGET_HAS_ctpop_i32 0
177
-#define TCG_TARGET_HAS_brcond2 0
178
-#define TCG_TARGET_HAS_setcond2 0
179
-#define TCG_TARGET_HAS_qemu_st8_i32 0
180
-
181
-/* 64-bit operations */
182
-#define TCG_TARGET_HAS_negsetcond_i64 0
183
-#define TCG_TARGET_HAS_div_i64 1
184
-#define TCG_TARGET_HAS_rem_i64 1
185
-#define TCG_TARGET_HAS_div2_i64 0
186
-#define TCG_TARGET_HAS_rot_i64 1
187
-#define TCG_TARGET_HAS_deposit_i64 1
188
-#define TCG_TARGET_HAS_extract_i64 1
189
-#define TCG_TARGET_HAS_sextract_i64 0
190
-#define TCG_TARGET_HAS_extract2_i64 0
191
-#define TCG_TARGET_HAS_extr_i64_i32 1
192
-#define TCG_TARGET_HAS_ext8s_i64 1
193
-#define TCG_TARGET_HAS_ext16s_i64 1
194
-#define TCG_TARGET_HAS_ext32s_i64 1
195
-#define TCG_TARGET_HAS_ext8u_i64 1
196
-#define TCG_TARGET_HAS_ext16u_i64 1
197
-#define TCG_TARGET_HAS_ext32u_i64 1
198
-#define TCG_TARGET_HAS_bswap16_i64 1
199
-#define TCG_TARGET_HAS_bswap32_i64 1
200
-#define TCG_TARGET_HAS_bswap64_i64 1
201
-#define TCG_TARGET_HAS_not_i64 1
202
-#define TCG_TARGET_HAS_andc_i64 1
203
-#define TCG_TARGET_HAS_orc_i64 1
204
-#define TCG_TARGET_HAS_eqv_i64 0
205
-#define TCG_TARGET_HAS_nand_i64 0
206
-#define TCG_TARGET_HAS_nor_i64 1
207
-#define TCG_TARGET_HAS_clz_i64 1
208
-#define TCG_TARGET_HAS_ctz_i64 1
209
-#define TCG_TARGET_HAS_ctpop_i64 0
210
-#define TCG_TARGET_HAS_add2_i64 0
211
-#define TCG_TARGET_HAS_sub2_i64 0
212
-#define TCG_TARGET_HAS_mulu2_i64 0
213
-#define TCG_TARGET_HAS_muls2_i64 0
214
-#define TCG_TARGET_HAS_muluh_i64 1
215
-#define TCG_TARGET_HAS_mulsh_i64 1
216
-
217
-#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
218
-
219
-#define TCG_TARGET_HAS_tst 0
220
-
221
-#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_LSX)
222
-#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX)
223
-#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_LASX)
224
-
225
-#define TCG_TARGET_HAS_not_vec 1
226
-#define TCG_TARGET_HAS_neg_vec 1
227
-#define TCG_TARGET_HAS_abs_vec 0
228
-#define TCG_TARGET_HAS_andc_vec 1
229
-#define TCG_TARGET_HAS_orc_vec 1
230
-#define TCG_TARGET_HAS_nand_vec 0
231
-#define TCG_TARGET_HAS_nor_vec 1
232
-#define TCG_TARGET_HAS_eqv_vec 0
233
-#define TCG_TARGET_HAS_mul_vec 1
234
-#define TCG_TARGET_HAS_shi_vec 1
235
-#define TCG_TARGET_HAS_shs_vec 0
236
-#define TCG_TARGET_HAS_shv_vec 1
237
-#define TCG_TARGET_HAS_roti_vec 1
238
-#define TCG_TARGET_HAS_rots_vec 0
239
-#define TCG_TARGET_HAS_rotv_vec 1
240
-#define TCG_TARGET_HAS_sat_vec 1
241
-#define TCG_TARGET_HAS_minmax_vec 1
242
-#define TCG_TARGET_HAS_bitsel_vec 1
243
-#define TCG_TARGET_HAS_cmpsel_vec 0
244
-#define TCG_TARGET_HAS_tst_vec 0
245
+#include "tcg-target-has.h"
246
247
#define TCG_TARGET_DEFAULT_MO (0)
248
249
--
250
2.43.0
251
252
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-8-philmd@linaro.org>
4
---
5
tcg/mips/tcg-target-has.h | 122 ++++++++++++++++++++++++++++++++++++++
6
tcg/mips/tcg-target.h | 112 +---------------------------------
7
2 files changed, 123 insertions(+), 111 deletions(-)
8
create mode 100644 tcg/mips/tcg-target-has.h
1
9
10
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/mips/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: MIT */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
20
+ * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
21
+ */
22
+
23
+#ifndef TCG_TARGET_HAS_H
24
+#define TCG_TARGET_HAS_H
25
+
26
+/* MOVN/MOVZ instructions detection */
27
+#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \
28
+ defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \
29
+ defined(_MIPS_ARCH_MIPS4)
30
+#define use_movnz_instructions 1
31
+#else
32
+extern bool use_movnz_instructions;
33
+#endif
34
+
35
+/* MIPS32 instruction set detection */
36
+#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1)
37
+#define use_mips32_instructions 1
38
+#else
39
+extern bool use_mips32_instructions;
40
+#endif
41
+
42
+/* MIPS32R2 instruction set detection */
43
+#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
44
+#define use_mips32r2_instructions 1
45
+#else
46
+extern bool use_mips32r2_instructions;
47
+#endif
48
+
49
+/* MIPS32R6 instruction set detection */
50
+#if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
51
+#define use_mips32r6_instructions 1
52
+#else
53
+#define use_mips32r6_instructions 0
54
+#endif
55
+
56
+/* optional instructions */
57
+#define TCG_TARGET_HAS_div_i32 1
58
+#define TCG_TARGET_HAS_rem_i32 1
59
+#define TCG_TARGET_HAS_not_i32 1
60
+#define TCG_TARGET_HAS_nor_i32 1
61
+#define TCG_TARGET_HAS_andc_i32 0
62
+#define TCG_TARGET_HAS_orc_i32 0
63
+#define TCG_TARGET_HAS_eqv_i32 0
64
+#define TCG_TARGET_HAS_nand_i32 0
65
+#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
66
+#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
67
+#define TCG_TARGET_HAS_muluh_i32 1
68
+#define TCG_TARGET_HAS_mulsh_i32 1
69
+#define TCG_TARGET_HAS_bswap32_i32 1
70
+#define TCG_TARGET_HAS_negsetcond_i32 0
71
+
72
+#if TCG_TARGET_REG_BITS == 64
73
+#define TCG_TARGET_HAS_add2_i32 0
74
+#define TCG_TARGET_HAS_sub2_i32 0
75
+#define TCG_TARGET_HAS_extr_i64_i32 1
76
+#define TCG_TARGET_HAS_div_i64 1
77
+#define TCG_TARGET_HAS_rem_i64 1
78
+#define TCG_TARGET_HAS_not_i64 1
79
+#define TCG_TARGET_HAS_nor_i64 1
80
+#define TCG_TARGET_HAS_andc_i64 0
81
+#define TCG_TARGET_HAS_orc_i64 0
82
+#define TCG_TARGET_HAS_eqv_i64 0
83
+#define TCG_TARGET_HAS_nand_i64 0
84
+#define TCG_TARGET_HAS_add2_i64 0
85
+#define TCG_TARGET_HAS_sub2_i64 0
86
+#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
87
+#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions)
88
+#define TCG_TARGET_HAS_muluh_i64 1
89
+#define TCG_TARGET_HAS_mulsh_i64 1
90
+#define TCG_TARGET_HAS_ext32s_i64 1
91
+#define TCG_TARGET_HAS_ext32u_i64 1
92
+#define TCG_TARGET_HAS_negsetcond_i64 0
93
+#endif
94
+
95
+/* optional instructions detected at runtime */
96
+#define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions
97
+#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
98
+#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions
99
+#define TCG_TARGET_HAS_sextract_i32 0
100
+#define TCG_TARGET_HAS_extract2_i32 0
101
+#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
102
+#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
103
+#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
104
+#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions
105
+#define TCG_TARGET_HAS_ctz_i32 0
106
+#define TCG_TARGET_HAS_ctpop_i32 0
107
+#define TCG_TARGET_HAS_qemu_st8_i32 0
108
+
109
+#if TCG_TARGET_REG_BITS == 64
110
+#define TCG_TARGET_HAS_bswap16_i64 use_mips32r2_instructions
111
+#define TCG_TARGET_HAS_bswap32_i64 use_mips32r2_instructions
112
+#define TCG_TARGET_HAS_bswap64_i64 use_mips32r2_instructions
113
+#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
114
+#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions
115
+#define TCG_TARGET_HAS_sextract_i64 0
116
+#define TCG_TARGET_HAS_extract2_i64 0
117
+#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
118
+#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
119
+#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions
120
+#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions
121
+#define TCG_TARGET_HAS_ctz_i64 0
122
+#define TCG_TARGET_HAS_ctpop_i64 0
123
+#endif
124
+
125
+/* optional instructions automatically implemented */
126
+#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */
127
+#define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */
128
+
129
+#if TCG_TARGET_REG_BITS == 64
130
+#define TCG_TARGET_HAS_ext8u_i64 0 /* andi rt, rs, 0xff */
131
+#define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */
132
+#endif
133
+
134
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
135
+#define TCG_TARGET_HAS_tst 0
136
+
137
+#endif
138
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
139
index XXXXXXX..XXXXXXX 100644
140
--- a/tcg/mips/tcg-target.h
141
+++ b/tcg/mips/tcg-target.h
142
@@ -XXX,XX +XXX,XX @@ typedef enum {
143
TCG_AREG0 = TCG_REG_S8,
144
} TCGReg;
145
146
-/* MOVN/MOVZ instructions detection */
147
-#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 1)) || \
148
- defined(_MIPS_ARCH_LOONGSON2E) || defined(_MIPS_ARCH_LOONGSON2F) || \
149
- defined(_MIPS_ARCH_MIPS4)
150
-#define use_movnz_instructions 1
151
-#else
152
-extern bool use_movnz_instructions;
153
-#endif
154
-
155
-/* MIPS32 instruction set detection */
156
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 1)
157
-#define use_mips32_instructions 1
158
-#else
159
-extern bool use_mips32_instructions;
160
-#endif
161
-
162
-/* MIPS32R2 instruction set detection */
163
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
164
-#define use_mips32r2_instructions 1
165
-#else
166
-extern bool use_mips32r2_instructions;
167
-#endif
168
-
169
-/* MIPS32R6 instruction set detection */
170
-#if defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
171
-#define use_mips32r6_instructions 1
172
-#else
173
-#define use_mips32r6_instructions 0
174
-#endif
175
-
176
-/* optional instructions */
177
-#define TCG_TARGET_HAS_div_i32 1
178
-#define TCG_TARGET_HAS_rem_i32 1
179
-#define TCG_TARGET_HAS_not_i32 1
180
-#define TCG_TARGET_HAS_nor_i32 1
181
-#define TCG_TARGET_HAS_andc_i32 0
182
-#define TCG_TARGET_HAS_orc_i32 0
183
-#define TCG_TARGET_HAS_eqv_i32 0
184
-#define TCG_TARGET_HAS_nand_i32 0
185
-#define TCG_TARGET_HAS_mulu2_i32 (!use_mips32r6_instructions)
186
-#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
187
-#define TCG_TARGET_HAS_muluh_i32 1
188
-#define TCG_TARGET_HAS_mulsh_i32 1
189
-#define TCG_TARGET_HAS_bswap32_i32 1
190
-#define TCG_TARGET_HAS_negsetcond_i32 0
191
-
192
-#if TCG_TARGET_REG_BITS == 64
193
-#define TCG_TARGET_HAS_add2_i32 0
194
-#define TCG_TARGET_HAS_sub2_i32 0
195
-#define TCG_TARGET_HAS_extr_i64_i32 1
196
-#define TCG_TARGET_HAS_div_i64 1
197
-#define TCG_TARGET_HAS_rem_i64 1
198
-#define TCG_TARGET_HAS_not_i64 1
199
-#define TCG_TARGET_HAS_nor_i64 1
200
-#define TCG_TARGET_HAS_andc_i64 0
201
-#define TCG_TARGET_HAS_orc_i64 0
202
-#define TCG_TARGET_HAS_eqv_i64 0
203
-#define TCG_TARGET_HAS_nand_i64 0
204
-#define TCG_TARGET_HAS_add2_i64 0
205
-#define TCG_TARGET_HAS_sub2_i64 0
206
-#define TCG_TARGET_HAS_mulu2_i64 (!use_mips32r6_instructions)
207
-#define TCG_TARGET_HAS_muls2_i64 (!use_mips32r6_instructions)
208
-#define TCG_TARGET_HAS_muluh_i64 1
209
-#define TCG_TARGET_HAS_mulsh_i64 1
210
-#define TCG_TARGET_HAS_ext32s_i64 1
211
-#define TCG_TARGET_HAS_ext32u_i64 1
212
-#define TCG_TARGET_HAS_negsetcond_i64 0
213
-#endif
214
-
215
-/* optional instructions detected at runtime */
216
-#define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions
217
-#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
218
-#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions
219
-#define TCG_TARGET_HAS_sextract_i32 0
220
-#define TCG_TARGET_HAS_extract2_i32 0
221
-#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
222
-#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
223
-#define TCG_TARGET_HAS_rot_i32 use_mips32r2_instructions
224
-#define TCG_TARGET_HAS_clz_i32 use_mips32r2_instructions
225
-#define TCG_TARGET_HAS_ctz_i32 0
226
-#define TCG_TARGET_HAS_ctpop_i32 0
227
-#define TCG_TARGET_HAS_qemu_st8_i32 0
228
-
229
-#if TCG_TARGET_REG_BITS == 64
230
-#define TCG_TARGET_HAS_bswap16_i64 use_mips32r2_instructions
231
-#define TCG_TARGET_HAS_bswap32_i64 use_mips32r2_instructions
232
-#define TCG_TARGET_HAS_bswap64_i64 use_mips32r2_instructions
233
-#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
234
-#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions
235
-#define TCG_TARGET_HAS_sextract_i64 0
236
-#define TCG_TARGET_HAS_extract2_i64 0
237
-#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
238
-#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
239
-#define TCG_TARGET_HAS_rot_i64 use_mips32r2_instructions
240
-#define TCG_TARGET_HAS_clz_i64 use_mips32r2_instructions
241
-#define TCG_TARGET_HAS_ctz_i64 0
242
-#define TCG_TARGET_HAS_ctpop_i64 0
243
-#endif
244
-
245
-/* optional instructions automatically implemented */
246
-#define TCG_TARGET_HAS_ext8u_i32 0 /* andi rt, rs, 0xff */
247
-#define TCG_TARGET_HAS_ext16u_i32 0 /* andi rt, rs, 0xffff */
248
-
249
-#if TCG_TARGET_REG_BITS == 64
250
-#define TCG_TARGET_HAS_ext8u_i64 0 /* andi rt, rs, 0xff */
251
-#define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */
252
-#endif
253
-
254
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
255
-
256
-#define TCG_TARGET_HAS_tst 0
257
+#include "tcg-target-has.h"
258
259
#define TCG_TARGET_DEFAULT_MO 0
260
261
--
262
2.43.0
263
264
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-9-philmd@linaro.org>
4
---
5
tcg/ppc/tcg-target-has.h | 124 +++++++++++++++++++++++++++++++++++++++
6
tcg/ppc/tcg-target.h | 114 +----------------------------------
7
2 files changed, 125 insertions(+), 113 deletions(-)
8
create mode 100644 tcg/ppc/tcg-target-has.h
1
9
10
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/ppc/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: MIT */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2008 Fabrice Bellard
20
+ */
21
+
22
+#ifndef TCG_TARGET_HAS_H
23
+#define TCG_TARGET_HAS_H
24
+
25
+#include "host/cpuinfo.h"
26
+
27
+#define have_isa_2_06 (cpuinfo & CPUINFO_V2_06)
28
+#define have_isa_2_07 (cpuinfo & CPUINFO_V2_07)
29
+#define have_isa_3_00 (cpuinfo & CPUINFO_V3_0)
30
+#define have_isa_3_10 (cpuinfo & CPUINFO_V3_1)
31
+#define have_altivec (cpuinfo & CPUINFO_ALTIVEC)
32
+#define have_vsx (cpuinfo & CPUINFO_VSX)
33
+
34
+/* optional instructions automatically implemented */
35
+#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */
36
+#define TCG_TARGET_HAS_ext16u_i32 0
37
+
38
+/* optional instructions */
39
+#define TCG_TARGET_HAS_div_i32 1
40
+#define TCG_TARGET_HAS_rem_i32 have_isa_3_00
41
+#define TCG_TARGET_HAS_rot_i32 1
42
+#define TCG_TARGET_HAS_ext8s_i32 1
43
+#define TCG_TARGET_HAS_ext16s_i32 1
44
+#define TCG_TARGET_HAS_bswap16_i32 1
45
+#define TCG_TARGET_HAS_bswap32_i32 1
46
+#define TCG_TARGET_HAS_not_i32 1
47
+#define TCG_TARGET_HAS_andc_i32 1
48
+#define TCG_TARGET_HAS_orc_i32 1
49
+#define TCG_TARGET_HAS_eqv_i32 1
50
+#define TCG_TARGET_HAS_nand_i32 1
51
+#define TCG_TARGET_HAS_nor_i32 1
52
+#define TCG_TARGET_HAS_clz_i32 1
53
+#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
54
+#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
55
+#define TCG_TARGET_HAS_deposit_i32 1
56
+#define TCG_TARGET_HAS_extract_i32 1
57
+#define TCG_TARGET_HAS_sextract_i32 0
58
+#define TCG_TARGET_HAS_extract2_i32 0
59
+#define TCG_TARGET_HAS_negsetcond_i32 1
60
+#define TCG_TARGET_HAS_mulu2_i32 0
61
+#define TCG_TARGET_HAS_muls2_i32 0
62
+#define TCG_TARGET_HAS_muluh_i32 1
63
+#define TCG_TARGET_HAS_mulsh_i32 1
64
+#define TCG_TARGET_HAS_qemu_st8_i32 0
65
+
66
+#if TCG_TARGET_REG_BITS == 64
67
+#define TCG_TARGET_HAS_add2_i32 0
68
+#define TCG_TARGET_HAS_sub2_i32 0
69
+#define TCG_TARGET_HAS_extr_i64_i32 0
70
+#define TCG_TARGET_HAS_div_i64 1
71
+#define TCG_TARGET_HAS_rem_i64 have_isa_3_00
72
+#define TCG_TARGET_HAS_rot_i64 1
73
+#define TCG_TARGET_HAS_ext8s_i64 1
74
+#define TCG_TARGET_HAS_ext16s_i64 1
75
+#define TCG_TARGET_HAS_ext32s_i64 1
76
+#define TCG_TARGET_HAS_ext8u_i64 0
77
+#define TCG_TARGET_HAS_ext16u_i64 0
78
+#define TCG_TARGET_HAS_ext32u_i64 0
79
+#define TCG_TARGET_HAS_bswap16_i64 1
80
+#define TCG_TARGET_HAS_bswap32_i64 1
81
+#define TCG_TARGET_HAS_bswap64_i64 1
82
+#define TCG_TARGET_HAS_not_i64 1
83
+#define TCG_TARGET_HAS_andc_i64 1
84
+#define TCG_TARGET_HAS_orc_i64 1
85
+#define TCG_TARGET_HAS_eqv_i64 1
86
+#define TCG_TARGET_HAS_nand_i64 1
87
+#define TCG_TARGET_HAS_nor_i64 1
88
+#define TCG_TARGET_HAS_clz_i64 1
89
+#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
90
+#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
91
+#define TCG_TARGET_HAS_deposit_i64 1
92
+#define TCG_TARGET_HAS_extract_i64 1
93
+#define TCG_TARGET_HAS_sextract_i64 0
94
+#define TCG_TARGET_HAS_extract2_i64 0
95
+#define TCG_TARGET_HAS_negsetcond_i64 1
96
+#define TCG_TARGET_HAS_add2_i64 1
97
+#define TCG_TARGET_HAS_sub2_i64 1
98
+#define TCG_TARGET_HAS_mulu2_i64 0
99
+#define TCG_TARGET_HAS_muls2_i64 0
100
+#define TCG_TARGET_HAS_muluh_i64 1
101
+#define TCG_TARGET_HAS_mulsh_i64 1
102
+#endif
103
+
104
+#define TCG_TARGET_HAS_qemu_ldst_i128 \
105
+ (TCG_TARGET_REG_BITS == 64 && have_isa_2_07)
106
+
107
+#define TCG_TARGET_HAS_tst 1
108
+
109
+/*
110
+ * While technically Altivec could support V64, it has no 64-bit store
111
+ * instruction and substituting two 32-bit stores makes the generated
112
+ * code quite large.
113
+ */
114
+#define TCG_TARGET_HAS_v64 have_vsx
115
+#define TCG_TARGET_HAS_v128 have_altivec
116
+#define TCG_TARGET_HAS_v256 0
117
+
118
+#define TCG_TARGET_HAS_andc_vec 1
119
+#define TCG_TARGET_HAS_orc_vec have_isa_2_07
120
+#define TCG_TARGET_HAS_nand_vec have_isa_2_07
121
+#define TCG_TARGET_HAS_nor_vec 1
122
+#define TCG_TARGET_HAS_eqv_vec have_isa_2_07
123
+#define TCG_TARGET_HAS_not_vec 1
124
+#define TCG_TARGET_HAS_neg_vec have_isa_3_00
125
+#define TCG_TARGET_HAS_abs_vec 0
126
+#define TCG_TARGET_HAS_roti_vec 0
127
+#define TCG_TARGET_HAS_rots_vec 0
128
+#define TCG_TARGET_HAS_rotv_vec 1
129
+#define TCG_TARGET_HAS_shi_vec 0
130
+#define TCG_TARGET_HAS_shs_vec 0
131
+#define TCG_TARGET_HAS_shv_vec 1
132
+#define TCG_TARGET_HAS_mul_vec 1
133
+#define TCG_TARGET_HAS_sat_vec 1
134
+#define TCG_TARGET_HAS_minmax_vec 1
135
+#define TCG_TARGET_HAS_bitsel_vec have_vsx
136
+#define TCG_TARGET_HAS_cmpsel_vec 1
137
+#define TCG_TARGET_HAS_tst_vec 0
138
+
139
+#endif
140
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/ppc/tcg-target.h
143
+++ b/tcg/ppc/tcg-target.h
144
@@ -XXX,XX +XXX,XX @@
145
#ifndef PPC_TCG_TARGET_H
146
#define PPC_TCG_TARGET_H
147
148
-#include "host/cpuinfo.h"
149
-
150
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
151
152
#define TCG_TARGET_NB_REGS 64
153
@@ -XXX,XX +XXX,XX @@ typedef enum {
154
TCG_AREG0 = TCG_REG_R27
155
} TCGReg;
156
157
-#define have_isa_2_06 (cpuinfo & CPUINFO_V2_06)
158
-#define have_isa_2_07 (cpuinfo & CPUINFO_V2_07)
159
-#define have_isa_3_00 (cpuinfo & CPUINFO_V3_0)
160
-#define have_isa_3_10 (cpuinfo & CPUINFO_V3_1)
161
-#define have_altivec (cpuinfo & CPUINFO_ALTIVEC)
162
-#define have_vsx (cpuinfo & CPUINFO_VSX)
163
-
164
-/* optional instructions automatically implemented */
165
-#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */
166
-#define TCG_TARGET_HAS_ext16u_i32 0
167
-
168
-/* optional instructions */
169
-#define TCG_TARGET_HAS_div_i32 1
170
-#define TCG_TARGET_HAS_rem_i32 have_isa_3_00
171
-#define TCG_TARGET_HAS_rot_i32 1
172
-#define TCG_TARGET_HAS_ext8s_i32 1
173
-#define TCG_TARGET_HAS_ext16s_i32 1
174
-#define TCG_TARGET_HAS_bswap16_i32 1
175
-#define TCG_TARGET_HAS_bswap32_i32 1
176
-#define TCG_TARGET_HAS_not_i32 1
177
-#define TCG_TARGET_HAS_andc_i32 1
178
-#define TCG_TARGET_HAS_orc_i32 1
179
-#define TCG_TARGET_HAS_eqv_i32 1
180
-#define TCG_TARGET_HAS_nand_i32 1
181
-#define TCG_TARGET_HAS_nor_i32 1
182
-#define TCG_TARGET_HAS_clz_i32 1
183
-#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
184
-#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
185
-#define TCG_TARGET_HAS_deposit_i32 1
186
-#define TCG_TARGET_HAS_extract_i32 1
187
-#define TCG_TARGET_HAS_sextract_i32 0
188
-#define TCG_TARGET_HAS_extract2_i32 0
189
-#define TCG_TARGET_HAS_negsetcond_i32 1
190
-#define TCG_TARGET_HAS_mulu2_i32 0
191
-#define TCG_TARGET_HAS_muls2_i32 0
192
-#define TCG_TARGET_HAS_muluh_i32 1
193
-#define TCG_TARGET_HAS_mulsh_i32 1
194
-#define TCG_TARGET_HAS_qemu_st8_i32 0
195
-
196
-#if TCG_TARGET_REG_BITS == 64
197
-#define TCG_TARGET_HAS_add2_i32 0
198
-#define TCG_TARGET_HAS_sub2_i32 0
199
-#define TCG_TARGET_HAS_extr_i64_i32 0
200
-#define TCG_TARGET_HAS_div_i64 1
201
-#define TCG_TARGET_HAS_rem_i64 have_isa_3_00
202
-#define TCG_TARGET_HAS_rot_i64 1
203
-#define TCG_TARGET_HAS_ext8s_i64 1
204
-#define TCG_TARGET_HAS_ext16s_i64 1
205
-#define TCG_TARGET_HAS_ext32s_i64 1
206
-#define TCG_TARGET_HAS_ext8u_i64 0
207
-#define TCG_TARGET_HAS_ext16u_i64 0
208
-#define TCG_TARGET_HAS_ext32u_i64 0
209
-#define TCG_TARGET_HAS_bswap16_i64 1
210
-#define TCG_TARGET_HAS_bswap32_i64 1
211
-#define TCG_TARGET_HAS_bswap64_i64 1
212
-#define TCG_TARGET_HAS_not_i64 1
213
-#define TCG_TARGET_HAS_andc_i64 1
214
-#define TCG_TARGET_HAS_orc_i64 1
215
-#define TCG_TARGET_HAS_eqv_i64 1
216
-#define TCG_TARGET_HAS_nand_i64 1
217
-#define TCG_TARGET_HAS_nor_i64 1
218
-#define TCG_TARGET_HAS_clz_i64 1
219
-#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
220
-#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
221
-#define TCG_TARGET_HAS_deposit_i64 1
222
-#define TCG_TARGET_HAS_extract_i64 1
223
-#define TCG_TARGET_HAS_sextract_i64 0
224
-#define TCG_TARGET_HAS_extract2_i64 0
225
-#define TCG_TARGET_HAS_negsetcond_i64 1
226
-#define TCG_TARGET_HAS_add2_i64 1
227
-#define TCG_TARGET_HAS_sub2_i64 1
228
-#define TCG_TARGET_HAS_mulu2_i64 0
229
-#define TCG_TARGET_HAS_muls2_i64 0
230
-#define TCG_TARGET_HAS_muluh_i64 1
231
-#define TCG_TARGET_HAS_mulsh_i64 1
232
-#endif
233
-
234
-#define TCG_TARGET_HAS_qemu_ldst_i128 \
235
- (TCG_TARGET_REG_BITS == 64 && have_isa_2_07)
236
-
237
-#define TCG_TARGET_HAS_tst 1
238
-
239
-/*
240
- * While technically Altivec could support V64, it has no 64-bit store
241
- * instruction and substituting two 32-bit stores makes the generated
242
- * code quite large.
243
- */
244
-#define TCG_TARGET_HAS_v64 have_vsx
245
-#define TCG_TARGET_HAS_v128 have_altivec
246
-#define TCG_TARGET_HAS_v256 0
247
-
248
-#define TCG_TARGET_HAS_andc_vec 1
249
-#define TCG_TARGET_HAS_orc_vec have_isa_2_07
250
-#define TCG_TARGET_HAS_nand_vec have_isa_2_07
251
-#define TCG_TARGET_HAS_nor_vec 1
252
-#define TCG_TARGET_HAS_eqv_vec have_isa_2_07
253
-#define TCG_TARGET_HAS_not_vec 1
254
-#define TCG_TARGET_HAS_neg_vec have_isa_3_00
255
-#define TCG_TARGET_HAS_abs_vec 0
256
-#define TCG_TARGET_HAS_roti_vec 0
257
-#define TCG_TARGET_HAS_rots_vec 0
258
-#define TCG_TARGET_HAS_rotv_vec 1
259
-#define TCG_TARGET_HAS_shi_vec 0
260
-#define TCG_TARGET_HAS_shs_vec 0
261
-#define TCG_TARGET_HAS_shv_vec 1
262
-#define TCG_TARGET_HAS_mul_vec 1
263
-#define TCG_TARGET_HAS_sat_vec 1
264
-#define TCG_TARGET_HAS_minmax_vec 1
265
-#define TCG_TARGET_HAS_bitsel_vec have_vsx
266
-#define TCG_TARGET_HAS_cmpsel_vec 1
267
-#define TCG_TARGET_HAS_tst_vec 0
268
+#include "tcg-target-has.h"
269
270
#define TCG_TARGET_DEFAULT_MO (0)
271
272
--
273
2.43.0
274
275
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-10-philmd@linaro.org>
4
---
5
tcg/riscv/tcg-target-has.h | 112 +++++++++++++++++++++++++++++++++++++
6
tcg/riscv/tcg-target.h | 102 +--------------------------------
7
2 files changed, 113 insertions(+), 101 deletions(-)
8
create mode 100644 tcg/riscv/tcg-target-has.h
1
9
10
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/riscv/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: MIT */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2018 SiFive, Inc
20
+ */
21
+
22
+#ifndef TCG_TARGET_HAS_H
23
+#define TCG_TARGET_HAS_H
24
+
25
+#include "host/cpuinfo.h"
26
+
27
+/* optional instructions */
28
+#define TCG_TARGET_HAS_negsetcond_i32 1
29
+#define TCG_TARGET_HAS_div_i32 1
30
+#define TCG_TARGET_HAS_rem_i32 1
31
+#define TCG_TARGET_HAS_div2_i32 0
32
+#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
33
+#define TCG_TARGET_HAS_deposit_i32 0
34
+#define TCG_TARGET_HAS_extract_i32 0
35
+#define TCG_TARGET_HAS_sextract_i32 0
36
+#define TCG_TARGET_HAS_extract2_i32 0
37
+#define TCG_TARGET_HAS_add2_i32 1
38
+#define TCG_TARGET_HAS_sub2_i32 1
39
+#define TCG_TARGET_HAS_mulu2_i32 0
40
+#define TCG_TARGET_HAS_muls2_i32 0
41
+#define TCG_TARGET_HAS_muluh_i32 0
42
+#define TCG_TARGET_HAS_mulsh_i32 0
43
+#define TCG_TARGET_HAS_ext8s_i32 1
44
+#define TCG_TARGET_HAS_ext16s_i32 1
45
+#define TCG_TARGET_HAS_ext8u_i32 1
46
+#define TCG_TARGET_HAS_ext16u_i32 1
47
+#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
48
+#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
49
+#define TCG_TARGET_HAS_not_i32 1
50
+#define TCG_TARGET_HAS_andc_i32 (cpuinfo & CPUINFO_ZBB)
51
+#define TCG_TARGET_HAS_orc_i32 (cpuinfo & CPUINFO_ZBB)
52
+#define TCG_TARGET_HAS_eqv_i32 (cpuinfo & CPUINFO_ZBB)
53
+#define TCG_TARGET_HAS_nand_i32 0
54
+#define TCG_TARGET_HAS_nor_i32 0
55
+#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
56
+#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB)
57
+#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB)
58
+#define TCG_TARGET_HAS_brcond2 1
59
+#define TCG_TARGET_HAS_setcond2 1
60
+#define TCG_TARGET_HAS_qemu_st8_i32 0
61
+
62
+#define TCG_TARGET_HAS_negsetcond_i64 1
63
+#define TCG_TARGET_HAS_div_i64 1
64
+#define TCG_TARGET_HAS_rem_i64 1
65
+#define TCG_TARGET_HAS_div2_i64 0
66
+#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
67
+#define TCG_TARGET_HAS_deposit_i64 0
68
+#define TCG_TARGET_HAS_extract_i64 0
69
+#define TCG_TARGET_HAS_sextract_i64 0
70
+#define TCG_TARGET_HAS_extract2_i64 0
71
+#define TCG_TARGET_HAS_extr_i64_i32 1
72
+#define TCG_TARGET_HAS_ext8s_i64 1
73
+#define TCG_TARGET_HAS_ext16s_i64 1
74
+#define TCG_TARGET_HAS_ext32s_i64 1
75
+#define TCG_TARGET_HAS_ext8u_i64 1
76
+#define TCG_TARGET_HAS_ext16u_i64 1
77
+#define TCG_TARGET_HAS_ext32u_i64 1
78
+#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
79
+#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
80
+#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
81
+#define TCG_TARGET_HAS_not_i64 1
82
+#define TCG_TARGET_HAS_andc_i64 (cpuinfo & CPUINFO_ZBB)
83
+#define TCG_TARGET_HAS_orc_i64 (cpuinfo & CPUINFO_ZBB)
84
+#define TCG_TARGET_HAS_eqv_i64 (cpuinfo & CPUINFO_ZBB)
85
+#define TCG_TARGET_HAS_nand_i64 0
86
+#define TCG_TARGET_HAS_nor_i64 0
87
+#define TCG_TARGET_HAS_clz_i64 (cpuinfo & CPUINFO_ZBB)
88
+#define TCG_TARGET_HAS_ctz_i64 (cpuinfo & CPUINFO_ZBB)
89
+#define TCG_TARGET_HAS_ctpop_i64 (cpuinfo & CPUINFO_ZBB)
90
+#define TCG_TARGET_HAS_add2_i64 1
91
+#define TCG_TARGET_HAS_sub2_i64 1
92
+#define TCG_TARGET_HAS_mulu2_i64 0
93
+#define TCG_TARGET_HAS_muls2_i64 0
94
+#define TCG_TARGET_HAS_muluh_i64 1
95
+#define TCG_TARGET_HAS_mulsh_i64 1
96
+
97
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
98
+
99
+#define TCG_TARGET_HAS_tst 0
100
+
101
+/* vector instructions */
102
+#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_ZVE64X)
103
+#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_ZVE64X)
104
+#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_ZVE64X)
105
+#define TCG_TARGET_HAS_andc_vec 0
106
+#define TCG_TARGET_HAS_orc_vec 0
107
+#define TCG_TARGET_HAS_nand_vec 0
108
+#define TCG_TARGET_HAS_nor_vec 0
109
+#define TCG_TARGET_HAS_eqv_vec 0
110
+#define TCG_TARGET_HAS_not_vec 1
111
+#define TCG_TARGET_HAS_neg_vec 1
112
+#define TCG_TARGET_HAS_abs_vec 0
113
+#define TCG_TARGET_HAS_roti_vec 1
114
+#define TCG_TARGET_HAS_rots_vec 1
115
+#define TCG_TARGET_HAS_rotv_vec 1
116
+#define TCG_TARGET_HAS_shi_vec 1
117
+#define TCG_TARGET_HAS_shs_vec 1
118
+#define TCG_TARGET_HAS_shv_vec 1
119
+#define TCG_TARGET_HAS_mul_vec 1
120
+#define TCG_TARGET_HAS_sat_vec 1
121
+#define TCG_TARGET_HAS_minmax_vec 1
122
+#define TCG_TARGET_HAS_bitsel_vec 0
123
+#define TCG_TARGET_HAS_cmpsel_vec 1
124
+
125
+#define TCG_TARGET_HAS_tst_vec 0
126
+
127
+#endif
128
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
129
index XXXXXXX..XXXXXXX 100644
130
--- a/tcg/riscv/tcg-target.h
131
+++ b/tcg/riscv/tcg-target.h
132
@@ -XXX,XX +XXX,XX @@
133
#ifndef RISCV_TCG_TARGET_H
134
#define RISCV_TCG_TARGET_H
135
136
-#include "host/cpuinfo.h"
137
-
138
#define TCG_TARGET_INSN_UNIT_SIZE 4
139
#define TCG_TARGET_NB_REGS 64
140
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
141
@@ -XXX,XX +XXX,XX @@ typedef enum {
142
TCG_REG_TMP2 = TCG_REG_T4,
143
} TCGReg;
144
145
-/* optional instructions */
146
-#define TCG_TARGET_HAS_negsetcond_i32 1
147
-#define TCG_TARGET_HAS_div_i32 1
148
-#define TCG_TARGET_HAS_rem_i32 1
149
-#define TCG_TARGET_HAS_div2_i32 0
150
-#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
151
-#define TCG_TARGET_HAS_deposit_i32 0
152
-#define TCG_TARGET_HAS_extract_i32 0
153
-#define TCG_TARGET_HAS_sextract_i32 0
154
-#define TCG_TARGET_HAS_extract2_i32 0
155
-#define TCG_TARGET_HAS_add2_i32 1
156
-#define TCG_TARGET_HAS_sub2_i32 1
157
-#define TCG_TARGET_HAS_mulu2_i32 0
158
-#define TCG_TARGET_HAS_muls2_i32 0
159
-#define TCG_TARGET_HAS_muluh_i32 0
160
-#define TCG_TARGET_HAS_mulsh_i32 0
161
-#define TCG_TARGET_HAS_ext8s_i32 1
162
-#define TCG_TARGET_HAS_ext16s_i32 1
163
-#define TCG_TARGET_HAS_ext8u_i32 1
164
-#define TCG_TARGET_HAS_ext16u_i32 1
165
-#define TCG_TARGET_HAS_bswap16_i32 (cpuinfo & CPUINFO_ZBB)
166
-#define TCG_TARGET_HAS_bswap32_i32 (cpuinfo & CPUINFO_ZBB)
167
-#define TCG_TARGET_HAS_not_i32 1
168
-#define TCG_TARGET_HAS_andc_i32 (cpuinfo & CPUINFO_ZBB)
169
-#define TCG_TARGET_HAS_orc_i32 (cpuinfo & CPUINFO_ZBB)
170
-#define TCG_TARGET_HAS_eqv_i32 (cpuinfo & CPUINFO_ZBB)
171
-#define TCG_TARGET_HAS_nand_i32 0
172
-#define TCG_TARGET_HAS_nor_i32 0
173
-#define TCG_TARGET_HAS_clz_i32 (cpuinfo & CPUINFO_ZBB)
174
-#define TCG_TARGET_HAS_ctz_i32 (cpuinfo & CPUINFO_ZBB)
175
-#define TCG_TARGET_HAS_ctpop_i32 (cpuinfo & CPUINFO_ZBB)
176
-#define TCG_TARGET_HAS_brcond2 1
177
-#define TCG_TARGET_HAS_setcond2 1
178
-#define TCG_TARGET_HAS_qemu_st8_i32 0
179
-
180
-#define TCG_TARGET_HAS_negsetcond_i64 1
181
-#define TCG_TARGET_HAS_div_i64 1
182
-#define TCG_TARGET_HAS_rem_i64 1
183
-#define TCG_TARGET_HAS_div2_i64 0
184
-#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
185
-#define TCG_TARGET_HAS_deposit_i64 0
186
-#define TCG_TARGET_HAS_extract_i64 0
187
-#define TCG_TARGET_HAS_sextract_i64 0
188
-#define TCG_TARGET_HAS_extract2_i64 0
189
-#define TCG_TARGET_HAS_extr_i64_i32 1
190
-#define TCG_TARGET_HAS_ext8s_i64 1
191
-#define TCG_TARGET_HAS_ext16s_i64 1
192
-#define TCG_TARGET_HAS_ext32s_i64 1
193
-#define TCG_TARGET_HAS_ext8u_i64 1
194
-#define TCG_TARGET_HAS_ext16u_i64 1
195
-#define TCG_TARGET_HAS_ext32u_i64 1
196
-#define TCG_TARGET_HAS_bswap16_i64 (cpuinfo & CPUINFO_ZBB)
197
-#define TCG_TARGET_HAS_bswap32_i64 (cpuinfo & CPUINFO_ZBB)
198
-#define TCG_TARGET_HAS_bswap64_i64 (cpuinfo & CPUINFO_ZBB)
199
-#define TCG_TARGET_HAS_not_i64 1
200
-#define TCG_TARGET_HAS_andc_i64 (cpuinfo & CPUINFO_ZBB)
201
-#define TCG_TARGET_HAS_orc_i64 (cpuinfo & CPUINFO_ZBB)
202
-#define TCG_TARGET_HAS_eqv_i64 (cpuinfo & CPUINFO_ZBB)
203
-#define TCG_TARGET_HAS_nand_i64 0
204
-#define TCG_TARGET_HAS_nor_i64 0
205
-#define TCG_TARGET_HAS_clz_i64 (cpuinfo & CPUINFO_ZBB)
206
-#define TCG_TARGET_HAS_ctz_i64 (cpuinfo & CPUINFO_ZBB)
207
-#define TCG_TARGET_HAS_ctpop_i64 (cpuinfo & CPUINFO_ZBB)
208
-#define TCG_TARGET_HAS_add2_i64 1
209
-#define TCG_TARGET_HAS_sub2_i64 1
210
-#define TCG_TARGET_HAS_mulu2_i64 0
211
-#define TCG_TARGET_HAS_muls2_i64 0
212
-#define TCG_TARGET_HAS_muluh_i64 1
213
-#define TCG_TARGET_HAS_mulsh_i64 1
214
-
215
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
216
-
217
-#define TCG_TARGET_HAS_tst 0
218
-
219
-/* vector instructions */
220
-#define TCG_TARGET_HAS_v64 (cpuinfo & CPUINFO_ZVE64X)
221
-#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_ZVE64X)
222
-#define TCG_TARGET_HAS_v256 (cpuinfo & CPUINFO_ZVE64X)
223
-#define TCG_TARGET_HAS_andc_vec 0
224
-#define TCG_TARGET_HAS_orc_vec 0
225
-#define TCG_TARGET_HAS_nand_vec 0
226
-#define TCG_TARGET_HAS_nor_vec 0
227
-#define TCG_TARGET_HAS_eqv_vec 0
228
-#define TCG_TARGET_HAS_not_vec 1
229
-#define TCG_TARGET_HAS_neg_vec 1
230
-#define TCG_TARGET_HAS_abs_vec 0
231
-#define TCG_TARGET_HAS_roti_vec 1
232
-#define TCG_TARGET_HAS_rots_vec 1
233
-#define TCG_TARGET_HAS_rotv_vec 1
234
-#define TCG_TARGET_HAS_shi_vec 1
235
-#define TCG_TARGET_HAS_shs_vec 1
236
-#define TCG_TARGET_HAS_shv_vec 1
237
-#define TCG_TARGET_HAS_mul_vec 1
238
-#define TCG_TARGET_HAS_sat_vec 1
239
-#define TCG_TARGET_HAS_minmax_vec 1
240
-#define TCG_TARGET_HAS_bitsel_vec 0
241
-#define TCG_TARGET_HAS_cmpsel_vec 1
242
-
243
-#define TCG_TARGET_HAS_tst_vec 0
244
+#include "tcg-target-has.h"
245
246
#define TCG_TARGET_DEFAULT_MO (0)
247
248
--
249
2.43.0
250
251
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-11-philmd@linaro.org>
4
---
5
tcg/s390x/tcg-target-has.h | 124 +++++++++++++++++++++++++++++++++++++
6
tcg/s390x/tcg-target.h | 114 +---------------------------------
7
2 files changed, 125 insertions(+), 113 deletions(-)
8
create mode 100644 tcg/s390x/tcg-target-has.h
1
9
10
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/s390x/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: MIT */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
20
+ */
21
+
22
+#ifndef TCG_TARGET_HAS_H
23
+#define TCG_TARGET_HAS_H
24
+
25
+/* Facilities required for proper operation; checked at startup. */
26
+
27
+#define FACILITY_ZARCH_ACTIVE 2
28
+#define FACILITY_LONG_DISP 18
29
+#define FACILITY_EXT_IMM 21
30
+#define FACILITY_GEN_INST_EXT 34
31
+#define FACILITY_45 45
32
+
33
+/* Facilities that are checked at runtime. */
34
+
35
+#define FACILITY_LOAD_ON_COND2 53
36
+#define FACILITY_MISC_INSN_EXT2 58
37
+#define FACILITY_MISC_INSN_EXT3 61
38
+#define FACILITY_VECTOR 129
39
+#define FACILITY_VECTOR_ENH1 135
40
+
41
+extern uint64_t s390_facilities[3];
42
+
43
+#define HAVE_FACILITY(X) \
44
+ ((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
45
+
46
+/* optional instructions */
47
+#define TCG_TARGET_HAS_div2_i32 1
48
+#define TCG_TARGET_HAS_rot_i32 1
49
+#define TCG_TARGET_HAS_ext8s_i32 1
50
+#define TCG_TARGET_HAS_ext16s_i32 1
51
+#define TCG_TARGET_HAS_ext8u_i32 1
52
+#define TCG_TARGET_HAS_ext16u_i32 1
53
+#define TCG_TARGET_HAS_bswap16_i32 1
54
+#define TCG_TARGET_HAS_bswap32_i32 1
55
+#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
56
+#define TCG_TARGET_HAS_andc_i32 HAVE_FACILITY(MISC_INSN_EXT3)
57
+#define TCG_TARGET_HAS_orc_i32 HAVE_FACILITY(MISC_INSN_EXT3)
58
+#define TCG_TARGET_HAS_eqv_i32 HAVE_FACILITY(MISC_INSN_EXT3)
59
+#define TCG_TARGET_HAS_nand_i32 HAVE_FACILITY(MISC_INSN_EXT3)
60
+#define TCG_TARGET_HAS_nor_i32 HAVE_FACILITY(MISC_INSN_EXT3)
61
+#define TCG_TARGET_HAS_clz_i32 0
62
+#define TCG_TARGET_HAS_ctz_i32 0
63
+#define TCG_TARGET_HAS_ctpop_i32 1
64
+#define TCG_TARGET_HAS_deposit_i32 1
65
+#define TCG_TARGET_HAS_extract_i32 1
66
+#define TCG_TARGET_HAS_sextract_i32 0
67
+#define TCG_TARGET_HAS_extract2_i32 0
68
+#define TCG_TARGET_HAS_negsetcond_i32 1
69
+#define TCG_TARGET_HAS_add2_i32 1
70
+#define TCG_TARGET_HAS_sub2_i32 1
71
+#define TCG_TARGET_HAS_mulu2_i32 0
72
+#define TCG_TARGET_HAS_muls2_i32 0
73
+#define TCG_TARGET_HAS_muluh_i32 0
74
+#define TCG_TARGET_HAS_mulsh_i32 0
75
+#define TCG_TARGET_HAS_extr_i64_i32 0
76
+#define TCG_TARGET_HAS_qemu_st8_i32 0
77
+
78
+#define TCG_TARGET_HAS_div2_i64 1
79
+#define TCG_TARGET_HAS_rot_i64 1
80
+#define TCG_TARGET_HAS_ext8s_i64 1
81
+#define TCG_TARGET_HAS_ext16s_i64 1
82
+#define TCG_TARGET_HAS_ext32s_i64 1
83
+#define TCG_TARGET_HAS_ext8u_i64 1
84
+#define TCG_TARGET_HAS_ext16u_i64 1
85
+#define TCG_TARGET_HAS_ext32u_i64 1
86
+#define TCG_TARGET_HAS_bswap16_i64 1
87
+#define TCG_TARGET_HAS_bswap32_i64 1
88
+#define TCG_TARGET_HAS_bswap64_i64 1
89
+#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3)
90
+#define TCG_TARGET_HAS_andc_i64 HAVE_FACILITY(MISC_INSN_EXT3)
91
+#define TCG_TARGET_HAS_orc_i64 HAVE_FACILITY(MISC_INSN_EXT3)
92
+#define TCG_TARGET_HAS_eqv_i64 HAVE_FACILITY(MISC_INSN_EXT3)
93
+#define TCG_TARGET_HAS_nand_i64 HAVE_FACILITY(MISC_INSN_EXT3)
94
+#define TCG_TARGET_HAS_nor_i64 HAVE_FACILITY(MISC_INSN_EXT3)
95
+#define TCG_TARGET_HAS_clz_i64 1
96
+#define TCG_TARGET_HAS_ctz_i64 0
97
+#define TCG_TARGET_HAS_ctpop_i64 1
98
+#define TCG_TARGET_HAS_deposit_i64 1
99
+#define TCG_TARGET_HAS_extract_i64 1
100
+#define TCG_TARGET_HAS_sextract_i64 0
101
+#define TCG_TARGET_HAS_extract2_i64 0
102
+#define TCG_TARGET_HAS_negsetcond_i64 1
103
+#define TCG_TARGET_HAS_add2_i64 1
104
+#define TCG_TARGET_HAS_sub2_i64 1
105
+#define TCG_TARGET_HAS_mulu2_i64 1
106
+#define TCG_TARGET_HAS_muls2_i64 HAVE_FACILITY(MISC_INSN_EXT2)
107
+#define TCG_TARGET_HAS_muluh_i64 0
108
+#define TCG_TARGET_HAS_mulsh_i64 0
109
+
110
+#define TCG_TARGET_HAS_qemu_ldst_i128 1
111
+
112
+#define TCG_TARGET_HAS_tst 1
113
+
114
+#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
115
+#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
116
+#define TCG_TARGET_HAS_v256 0
117
+
118
+#define TCG_TARGET_HAS_andc_vec 1
119
+#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
120
+#define TCG_TARGET_HAS_nand_vec HAVE_FACILITY(VECTOR_ENH1)
121
+#define TCG_TARGET_HAS_nor_vec 1
122
+#define TCG_TARGET_HAS_eqv_vec HAVE_FACILITY(VECTOR_ENH1)
123
+#define TCG_TARGET_HAS_not_vec 1
124
+#define TCG_TARGET_HAS_neg_vec 1
125
+#define TCG_TARGET_HAS_abs_vec 1
126
+#define TCG_TARGET_HAS_roti_vec 1
127
+#define TCG_TARGET_HAS_rots_vec 1
128
+#define TCG_TARGET_HAS_rotv_vec 1
129
+#define TCG_TARGET_HAS_shi_vec 1
130
+#define TCG_TARGET_HAS_shs_vec 1
131
+#define TCG_TARGET_HAS_shv_vec 1
132
+#define TCG_TARGET_HAS_mul_vec 1
133
+#define TCG_TARGET_HAS_sat_vec 0
134
+#define TCG_TARGET_HAS_minmax_vec 1
135
+#define TCG_TARGET_HAS_bitsel_vec 1
136
+#define TCG_TARGET_HAS_cmpsel_vec 1
137
+#define TCG_TARGET_HAS_tst_vec 0
138
+
139
+#endif
140
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/s390x/tcg-target.h
143
+++ b/tcg/s390x/tcg-target.h
144
@@ -XXX,XX +XXX,XX @@ typedef enum TCGReg {
145
146
#define TCG_TARGET_NB_REGS 64
147
148
-/* Facilities required for proper operation; checked at startup. */
149
-
150
-#define FACILITY_ZARCH_ACTIVE 2
151
-#define FACILITY_LONG_DISP 18
152
-#define FACILITY_EXT_IMM 21
153
-#define FACILITY_GEN_INST_EXT 34
154
-#define FACILITY_45 45
155
-
156
-/* Facilities that are checked at runtime. */
157
-
158
-#define FACILITY_LOAD_ON_COND2 53
159
-#define FACILITY_MISC_INSN_EXT2 58
160
-#define FACILITY_MISC_INSN_EXT3 61
161
-#define FACILITY_VECTOR 129
162
-#define FACILITY_VECTOR_ENH1 135
163
-
164
-extern uint64_t s390_facilities[3];
165
-
166
-#define HAVE_FACILITY(X) \
167
- ((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1)
168
-
169
-/* optional instructions */
170
-#define TCG_TARGET_HAS_div2_i32 1
171
-#define TCG_TARGET_HAS_rot_i32 1
172
-#define TCG_TARGET_HAS_ext8s_i32 1
173
-#define TCG_TARGET_HAS_ext16s_i32 1
174
-#define TCG_TARGET_HAS_ext8u_i32 1
175
-#define TCG_TARGET_HAS_ext16u_i32 1
176
-#define TCG_TARGET_HAS_bswap16_i32 1
177
-#define TCG_TARGET_HAS_bswap32_i32 1
178
-#define TCG_TARGET_HAS_not_i32 HAVE_FACILITY(MISC_INSN_EXT3)
179
-#define TCG_TARGET_HAS_andc_i32 HAVE_FACILITY(MISC_INSN_EXT3)
180
-#define TCG_TARGET_HAS_orc_i32 HAVE_FACILITY(MISC_INSN_EXT3)
181
-#define TCG_TARGET_HAS_eqv_i32 HAVE_FACILITY(MISC_INSN_EXT3)
182
-#define TCG_TARGET_HAS_nand_i32 HAVE_FACILITY(MISC_INSN_EXT3)
183
-#define TCG_TARGET_HAS_nor_i32 HAVE_FACILITY(MISC_INSN_EXT3)
184
-#define TCG_TARGET_HAS_clz_i32 0
185
-#define TCG_TARGET_HAS_ctz_i32 0
186
-#define TCG_TARGET_HAS_ctpop_i32 1
187
-#define TCG_TARGET_HAS_deposit_i32 1
188
-#define TCG_TARGET_HAS_extract_i32 1
189
-#define TCG_TARGET_HAS_sextract_i32 0
190
-#define TCG_TARGET_HAS_extract2_i32 0
191
-#define TCG_TARGET_HAS_negsetcond_i32 1
192
-#define TCG_TARGET_HAS_add2_i32 1
193
-#define TCG_TARGET_HAS_sub2_i32 1
194
-#define TCG_TARGET_HAS_mulu2_i32 0
195
-#define TCG_TARGET_HAS_muls2_i32 0
196
-#define TCG_TARGET_HAS_muluh_i32 0
197
-#define TCG_TARGET_HAS_mulsh_i32 0
198
-#define TCG_TARGET_HAS_extr_i64_i32 0
199
-#define TCG_TARGET_HAS_qemu_st8_i32 0
200
-
201
-#define TCG_TARGET_HAS_div2_i64 1
202
-#define TCG_TARGET_HAS_rot_i64 1
203
-#define TCG_TARGET_HAS_ext8s_i64 1
204
-#define TCG_TARGET_HAS_ext16s_i64 1
205
-#define TCG_TARGET_HAS_ext32s_i64 1
206
-#define TCG_TARGET_HAS_ext8u_i64 1
207
-#define TCG_TARGET_HAS_ext16u_i64 1
208
-#define TCG_TARGET_HAS_ext32u_i64 1
209
-#define TCG_TARGET_HAS_bswap16_i64 1
210
-#define TCG_TARGET_HAS_bswap32_i64 1
211
-#define TCG_TARGET_HAS_bswap64_i64 1
212
-#define TCG_TARGET_HAS_not_i64 HAVE_FACILITY(MISC_INSN_EXT3)
213
-#define TCG_TARGET_HAS_andc_i64 HAVE_FACILITY(MISC_INSN_EXT3)
214
-#define TCG_TARGET_HAS_orc_i64 HAVE_FACILITY(MISC_INSN_EXT3)
215
-#define TCG_TARGET_HAS_eqv_i64 HAVE_FACILITY(MISC_INSN_EXT3)
216
-#define TCG_TARGET_HAS_nand_i64 HAVE_FACILITY(MISC_INSN_EXT3)
217
-#define TCG_TARGET_HAS_nor_i64 HAVE_FACILITY(MISC_INSN_EXT3)
218
-#define TCG_TARGET_HAS_clz_i64 1
219
-#define TCG_TARGET_HAS_ctz_i64 0
220
-#define TCG_TARGET_HAS_ctpop_i64 1
221
-#define TCG_TARGET_HAS_deposit_i64 1
222
-#define TCG_TARGET_HAS_extract_i64 1
223
-#define TCG_TARGET_HAS_sextract_i64 0
224
-#define TCG_TARGET_HAS_extract2_i64 0
225
-#define TCG_TARGET_HAS_negsetcond_i64 1
226
-#define TCG_TARGET_HAS_add2_i64 1
227
-#define TCG_TARGET_HAS_sub2_i64 1
228
-#define TCG_TARGET_HAS_mulu2_i64 1
229
-#define TCG_TARGET_HAS_muls2_i64 HAVE_FACILITY(MISC_INSN_EXT2)
230
-#define TCG_TARGET_HAS_muluh_i64 0
231
-#define TCG_TARGET_HAS_mulsh_i64 0
232
-
233
-#define TCG_TARGET_HAS_qemu_ldst_i128 1
234
-
235
-#define TCG_TARGET_HAS_tst 1
236
-
237
-#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
238
-#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
239
-#define TCG_TARGET_HAS_v256 0
240
-
241
-#define TCG_TARGET_HAS_andc_vec 1
242
-#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
243
-#define TCG_TARGET_HAS_nand_vec HAVE_FACILITY(VECTOR_ENH1)
244
-#define TCG_TARGET_HAS_nor_vec 1
245
-#define TCG_TARGET_HAS_eqv_vec HAVE_FACILITY(VECTOR_ENH1)
246
-#define TCG_TARGET_HAS_not_vec 1
247
-#define TCG_TARGET_HAS_neg_vec 1
248
-#define TCG_TARGET_HAS_abs_vec 1
249
-#define TCG_TARGET_HAS_roti_vec 1
250
-#define TCG_TARGET_HAS_rots_vec 1
251
-#define TCG_TARGET_HAS_rotv_vec 1
252
-#define TCG_TARGET_HAS_shi_vec 1
253
-#define TCG_TARGET_HAS_shs_vec 1
254
-#define TCG_TARGET_HAS_shv_vec 1
255
-#define TCG_TARGET_HAS_mul_vec 1
256
-#define TCG_TARGET_HAS_sat_vec 0
257
-#define TCG_TARGET_HAS_minmax_vec 1
258
-#define TCG_TARGET_HAS_bitsel_vec 1
259
-#define TCG_TARGET_HAS_cmpsel_vec 1
260
-#define TCG_TARGET_HAS_tst_vec 0
261
+#include "tcg-target-has.h"
262
263
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
264
265
--
266
2.43.0
267
268
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-12-philmd@linaro.org>
4
---
5
tcg/sparc64/tcg-target-has.h | 86 ++++++++++++++++++++++++++++++++++++
6
tcg/sparc64/tcg-target.h | 78 +-------------------------------
7
2 files changed, 88 insertions(+), 76 deletions(-)
8
create mode 100644 tcg/sparc64/tcg-target-has.h
1
9
10
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/sparc64/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: MIT */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2008 Fabrice Bellard
20
+ */
21
+
22
+#ifndef TCG_TARGET_HAS_H
23
+#define TCG_TARGET_HAS_H
24
+
25
+#if defined(__VIS__) && __VIS__ >= 0x300
26
+#define use_vis3_instructions 1
27
+#else
28
+extern bool use_vis3_instructions;
29
+#endif
30
+
31
+/* optional instructions */
32
+#define TCG_TARGET_HAS_div_i32        1
33
+#define TCG_TARGET_HAS_rem_i32        0
34
+#define TCG_TARGET_HAS_rot_i32 0
35
+#define TCG_TARGET_HAS_ext8s_i32 0
36
+#define TCG_TARGET_HAS_ext16s_i32 0
37
+#define TCG_TARGET_HAS_ext8u_i32 0
38
+#define TCG_TARGET_HAS_ext16u_i32 0
39
+#define TCG_TARGET_HAS_bswap16_i32 0
40
+#define TCG_TARGET_HAS_bswap32_i32 0
41
+#define TCG_TARGET_HAS_not_i32 1
42
+#define TCG_TARGET_HAS_andc_i32 1
43
+#define TCG_TARGET_HAS_orc_i32 1
44
+#define TCG_TARGET_HAS_eqv_i32 0
45
+#define TCG_TARGET_HAS_nand_i32 0
46
+#define TCG_TARGET_HAS_nor_i32 0
47
+#define TCG_TARGET_HAS_clz_i32 0
48
+#define TCG_TARGET_HAS_ctz_i32 0
49
+#define TCG_TARGET_HAS_ctpop_i32 0
50
+#define TCG_TARGET_HAS_deposit_i32 0
51
+#define TCG_TARGET_HAS_extract_i32 0
52
+#define TCG_TARGET_HAS_sextract_i32 0
53
+#define TCG_TARGET_HAS_extract2_i32 0
54
+#define TCG_TARGET_HAS_negsetcond_i32 1
55
+#define TCG_TARGET_HAS_add2_i32 1
56
+#define TCG_TARGET_HAS_sub2_i32 1
57
+#define TCG_TARGET_HAS_mulu2_i32 1
58
+#define TCG_TARGET_HAS_muls2_i32 1
59
+#define TCG_TARGET_HAS_muluh_i32 0
60
+#define TCG_TARGET_HAS_mulsh_i32 0
61
+#define TCG_TARGET_HAS_qemu_st8_i32 0
62
+
63
+#define TCG_TARGET_HAS_extr_i64_i32 0
64
+#define TCG_TARGET_HAS_div_i64 1
65
+#define TCG_TARGET_HAS_rem_i64 0
66
+#define TCG_TARGET_HAS_rot_i64 0
67
+#define TCG_TARGET_HAS_ext8s_i64 0
68
+#define TCG_TARGET_HAS_ext16s_i64 0
69
+#define TCG_TARGET_HAS_ext32s_i64 1
70
+#define TCG_TARGET_HAS_ext8u_i64 0
71
+#define TCG_TARGET_HAS_ext16u_i64 0
72
+#define TCG_TARGET_HAS_ext32u_i64 1
73
+#define TCG_TARGET_HAS_bswap16_i64 0
74
+#define TCG_TARGET_HAS_bswap32_i64 0
75
+#define TCG_TARGET_HAS_bswap64_i64 0
76
+#define TCG_TARGET_HAS_not_i64 1
77
+#define TCG_TARGET_HAS_andc_i64 1
78
+#define TCG_TARGET_HAS_orc_i64 1
79
+#define TCG_TARGET_HAS_eqv_i64 0
80
+#define TCG_TARGET_HAS_nand_i64 0
81
+#define TCG_TARGET_HAS_nor_i64 0
82
+#define TCG_TARGET_HAS_clz_i64 0
83
+#define TCG_TARGET_HAS_ctz_i64 0
84
+#define TCG_TARGET_HAS_ctpop_i64 0
85
+#define TCG_TARGET_HAS_deposit_i64 0
86
+#define TCG_TARGET_HAS_extract_i64 0
87
+#define TCG_TARGET_HAS_sextract_i64 0
88
+#define TCG_TARGET_HAS_extract2_i64 0
89
+#define TCG_TARGET_HAS_negsetcond_i64 1
90
+#define TCG_TARGET_HAS_add2_i64 1
91
+#define TCG_TARGET_HAS_sub2_i64 1
92
+#define TCG_TARGET_HAS_mulu2_i64 0
93
+#define TCG_TARGET_HAS_muls2_i64 0
94
+#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions
95
+#define TCG_TARGET_HAS_mulsh_i64 0
96
+
97
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
98
+
99
+#define TCG_TARGET_HAS_tst 1
100
+
101
+#endif
102
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
103
index XXXXXXX..XXXXXXX 100644
104
--- a/tcg/sparc64/tcg-target.h
105
+++ b/tcg/sparc64/tcg-target.h
106
@@ -XXX,XX +XXX,XX @@ typedef enum {
107
TCG_REG_I7,
108
} TCGReg;
109
110
-#if defined(__VIS__) && __VIS__ >= 0x300
111
-#define use_vis3_instructions 1
112
-#else
113
-extern bool use_vis3_instructions;
114
-#endif
115
-
116
-/* optional instructions */
117
-#define TCG_TARGET_HAS_div_i32        1
118
-#define TCG_TARGET_HAS_rem_i32        0
119
-#define TCG_TARGET_HAS_rot_i32 0
120
-#define TCG_TARGET_HAS_ext8s_i32 0
121
-#define TCG_TARGET_HAS_ext16s_i32 0
122
-#define TCG_TARGET_HAS_ext8u_i32 0
123
-#define TCG_TARGET_HAS_ext16u_i32 0
124
-#define TCG_TARGET_HAS_bswap16_i32 0
125
-#define TCG_TARGET_HAS_bswap32_i32 0
126
-#define TCG_TARGET_HAS_not_i32 1
127
-#define TCG_TARGET_HAS_andc_i32 1
128
-#define TCG_TARGET_HAS_orc_i32 1
129
-#define TCG_TARGET_HAS_eqv_i32 0
130
-#define TCG_TARGET_HAS_nand_i32 0
131
-#define TCG_TARGET_HAS_nor_i32 0
132
-#define TCG_TARGET_HAS_clz_i32 0
133
-#define TCG_TARGET_HAS_ctz_i32 0
134
-#define TCG_TARGET_HAS_ctpop_i32 0
135
-#define TCG_TARGET_HAS_deposit_i32 0
136
-#define TCG_TARGET_HAS_extract_i32 0
137
-#define TCG_TARGET_HAS_sextract_i32 0
138
-#define TCG_TARGET_HAS_extract2_i32 0
139
-#define TCG_TARGET_HAS_negsetcond_i32 1
140
-#define TCG_TARGET_HAS_add2_i32 1
141
-#define TCG_TARGET_HAS_sub2_i32 1
142
-#define TCG_TARGET_HAS_mulu2_i32 1
143
-#define TCG_TARGET_HAS_muls2_i32 1
144
-#define TCG_TARGET_HAS_muluh_i32 0
145
-#define TCG_TARGET_HAS_mulsh_i32 0
146
-#define TCG_TARGET_HAS_qemu_st8_i32 0
147
-
148
-#define TCG_TARGET_HAS_extr_i64_i32 0
149
-#define TCG_TARGET_HAS_div_i64 1
150
-#define TCG_TARGET_HAS_rem_i64 0
151
-#define TCG_TARGET_HAS_rot_i64 0
152
-#define TCG_TARGET_HAS_ext8s_i64 0
153
-#define TCG_TARGET_HAS_ext16s_i64 0
154
-#define TCG_TARGET_HAS_ext32s_i64 1
155
-#define TCG_TARGET_HAS_ext8u_i64 0
156
-#define TCG_TARGET_HAS_ext16u_i64 0
157
-#define TCG_TARGET_HAS_ext32u_i64 1
158
-#define TCG_TARGET_HAS_bswap16_i64 0
159
-#define TCG_TARGET_HAS_bswap32_i64 0
160
-#define TCG_TARGET_HAS_bswap64_i64 0
161
-#define TCG_TARGET_HAS_not_i64 1
162
-#define TCG_TARGET_HAS_andc_i64 1
163
-#define TCG_TARGET_HAS_orc_i64 1
164
-#define TCG_TARGET_HAS_eqv_i64 0
165
-#define TCG_TARGET_HAS_nand_i64 0
166
-#define TCG_TARGET_HAS_nor_i64 0
167
-#define TCG_TARGET_HAS_clz_i64 0
168
-#define TCG_TARGET_HAS_ctz_i64 0
169
-#define TCG_TARGET_HAS_ctpop_i64 0
170
-#define TCG_TARGET_HAS_deposit_i64 0
171
-#define TCG_TARGET_HAS_extract_i64 0
172
-#define TCG_TARGET_HAS_sextract_i64 0
173
-#define TCG_TARGET_HAS_extract2_i64 0
174
-#define TCG_TARGET_HAS_negsetcond_i64 1
175
-#define TCG_TARGET_HAS_add2_i64 1
176
-#define TCG_TARGET_HAS_sub2_i64 1
177
-#define TCG_TARGET_HAS_mulu2_i64 0
178
-#define TCG_TARGET_HAS_muls2_i64 0
179
-#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions
180
-#define TCG_TARGET_HAS_mulsh_i64 0
181
-
182
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
183
-
184
-#define TCG_TARGET_HAS_tst 1
185
-
186
#define TCG_AREG0 TCG_REG_I0
187
188
+#include "tcg-target-has.h"
189
+
190
#define TCG_TARGET_DEFAULT_MO (0)
191
192
#endif
193
--
194
2.43.0
195
196
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-13-philmd@linaro.org>
4
---
5
tcg/tci/tcg-target-has.h | 83 ++++++++++++++++++++++++++++++++++++++++
6
tcg/tci/tcg-target.h | 75 +-----------------------------------
7
2 files changed, 84 insertions(+), 74 deletions(-)
8
create mode 100644 tcg/tci/tcg-target-has.h
1
9
10
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
11
new file mode 100644
12
index XXXXXXX..XXXXXXX
13
--- /dev/null
14
+++ b/tcg/tci/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
+/* SPDX-License-Identifier: MIT */
17
+/*
18
+ * Define target-specific opcode support
19
+ * Copyright (c) 2009, 2011 Stefan Weil
20
+ */
21
+
22
+#ifndef TCG_TARGET_HAS_H
23
+#define TCG_TARGET_HAS_H
24
+
25
+#define TCG_TARGET_HAS_bswap16_i32 1
26
+#define TCG_TARGET_HAS_bswap32_i32 1
27
+#define TCG_TARGET_HAS_div_i32 1
28
+#define TCG_TARGET_HAS_rem_i32 1
29
+#define TCG_TARGET_HAS_ext8s_i32 1
30
+#define TCG_TARGET_HAS_ext16s_i32 1
31
+#define TCG_TARGET_HAS_ext8u_i32 1
32
+#define TCG_TARGET_HAS_ext16u_i32 1
33
+#define TCG_TARGET_HAS_andc_i32 1
34
+#define TCG_TARGET_HAS_deposit_i32 1
35
+#define TCG_TARGET_HAS_extract_i32 1
36
+#define TCG_TARGET_HAS_sextract_i32 1
37
+#define TCG_TARGET_HAS_extract2_i32 0
38
+#define TCG_TARGET_HAS_eqv_i32 1
39
+#define TCG_TARGET_HAS_nand_i32 1
40
+#define TCG_TARGET_HAS_nor_i32 1
41
+#define TCG_TARGET_HAS_clz_i32 1
42
+#define TCG_TARGET_HAS_ctz_i32 1
43
+#define TCG_TARGET_HAS_ctpop_i32 1
44
+#define TCG_TARGET_HAS_not_i32 1
45
+#define TCG_TARGET_HAS_orc_i32 1
46
+#define TCG_TARGET_HAS_rot_i32 1
47
+#define TCG_TARGET_HAS_negsetcond_i32 0
48
+#define TCG_TARGET_HAS_muls2_i32 1
49
+#define TCG_TARGET_HAS_muluh_i32 0
50
+#define TCG_TARGET_HAS_mulsh_i32 0
51
+#define TCG_TARGET_HAS_qemu_st8_i32 0
52
+
53
+#if TCG_TARGET_REG_BITS == 64
54
+#define TCG_TARGET_HAS_extr_i64_i32 0
55
+#define TCG_TARGET_HAS_bswap16_i64 1
56
+#define TCG_TARGET_HAS_bswap32_i64 1
57
+#define TCG_TARGET_HAS_bswap64_i64 1
58
+#define TCG_TARGET_HAS_deposit_i64 1
59
+#define TCG_TARGET_HAS_extract_i64 1
60
+#define TCG_TARGET_HAS_sextract_i64 1
61
+#define TCG_TARGET_HAS_extract2_i64 0
62
+#define TCG_TARGET_HAS_div_i64 1
63
+#define TCG_TARGET_HAS_rem_i64 1
64
+#define TCG_TARGET_HAS_ext8s_i64 1
65
+#define TCG_TARGET_HAS_ext16s_i64 1
66
+#define TCG_TARGET_HAS_ext32s_i64 1
67
+#define TCG_TARGET_HAS_ext8u_i64 1
68
+#define TCG_TARGET_HAS_ext16u_i64 1
69
+#define TCG_TARGET_HAS_ext32u_i64 1
70
+#define TCG_TARGET_HAS_andc_i64 1
71
+#define TCG_TARGET_HAS_eqv_i64 1
72
+#define TCG_TARGET_HAS_nand_i64 1
73
+#define TCG_TARGET_HAS_nor_i64 1
74
+#define TCG_TARGET_HAS_clz_i64 1
75
+#define TCG_TARGET_HAS_ctz_i64 1
76
+#define TCG_TARGET_HAS_ctpop_i64 1
77
+#define TCG_TARGET_HAS_not_i64 1
78
+#define TCG_TARGET_HAS_orc_i64 1
79
+#define TCG_TARGET_HAS_rot_i64 1
80
+#define TCG_TARGET_HAS_negsetcond_i64 0
81
+#define TCG_TARGET_HAS_muls2_i64 1
82
+#define TCG_TARGET_HAS_add2_i32 1
83
+#define TCG_TARGET_HAS_sub2_i32 1
84
+#define TCG_TARGET_HAS_mulu2_i32 1
85
+#define TCG_TARGET_HAS_add2_i64 1
86
+#define TCG_TARGET_HAS_sub2_i64 1
87
+#define TCG_TARGET_HAS_mulu2_i64 1
88
+#define TCG_TARGET_HAS_muluh_i64 0
89
+#define TCG_TARGET_HAS_mulsh_i64 0
90
+#else
91
+#define TCG_TARGET_HAS_mulu2_i32 1
92
+#endif /* TCG_TARGET_REG_BITS == 64 */
93
+
94
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
95
+
96
+#define TCG_TARGET_HAS_tst 1
97
+
98
+#endif
99
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/tci/tcg-target.h
102
+++ b/tcg/tci/tcg-target.h
103
@@ -XXX,XX +XXX,XX @@
104
#define TCG_TARGET_INSN_UNIT_SIZE 4
105
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
106
107
-/* Optional instructions. */
108
-
109
-#define TCG_TARGET_HAS_bswap16_i32 1
110
-#define TCG_TARGET_HAS_bswap32_i32 1
111
-#define TCG_TARGET_HAS_div_i32 1
112
-#define TCG_TARGET_HAS_rem_i32 1
113
-#define TCG_TARGET_HAS_ext8s_i32 1
114
-#define TCG_TARGET_HAS_ext16s_i32 1
115
-#define TCG_TARGET_HAS_ext8u_i32 1
116
-#define TCG_TARGET_HAS_ext16u_i32 1
117
-#define TCG_TARGET_HAS_andc_i32 1
118
-#define TCG_TARGET_HAS_deposit_i32 1
119
-#define TCG_TARGET_HAS_extract_i32 1
120
-#define TCG_TARGET_HAS_sextract_i32 1
121
-#define TCG_TARGET_HAS_extract2_i32 0
122
-#define TCG_TARGET_HAS_eqv_i32 1
123
-#define TCG_TARGET_HAS_nand_i32 1
124
-#define TCG_TARGET_HAS_nor_i32 1
125
-#define TCG_TARGET_HAS_clz_i32 1
126
-#define TCG_TARGET_HAS_ctz_i32 1
127
-#define TCG_TARGET_HAS_ctpop_i32 1
128
-#define TCG_TARGET_HAS_not_i32 1
129
-#define TCG_TARGET_HAS_orc_i32 1
130
-#define TCG_TARGET_HAS_rot_i32 1
131
-#define TCG_TARGET_HAS_negsetcond_i32 0
132
-#define TCG_TARGET_HAS_muls2_i32 1
133
-#define TCG_TARGET_HAS_muluh_i32 0
134
-#define TCG_TARGET_HAS_mulsh_i32 0
135
-#define TCG_TARGET_HAS_qemu_st8_i32 0
136
-
137
-#if TCG_TARGET_REG_BITS == 64
138
-#define TCG_TARGET_HAS_extr_i64_i32 0
139
-#define TCG_TARGET_HAS_bswap16_i64 1
140
-#define TCG_TARGET_HAS_bswap32_i64 1
141
-#define TCG_TARGET_HAS_bswap64_i64 1
142
-#define TCG_TARGET_HAS_deposit_i64 1
143
-#define TCG_TARGET_HAS_extract_i64 1
144
-#define TCG_TARGET_HAS_sextract_i64 1
145
-#define TCG_TARGET_HAS_extract2_i64 0
146
-#define TCG_TARGET_HAS_div_i64 1
147
-#define TCG_TARGET_HAS_rem_i64 1
148
-#define TCG_TARGET_HAS_ext8s_i64 1
149
-#define TCG_TARGET_HAS_ext16s_i64 1
150
-#define TCG_TARGET_HAS_ext32s_i64 1
151
-#define TCG_TARGET_HAS_ext8u_i64 1
152
-#define TCG_TARGET_HAS_ext16u_i64 1
153
-#define TCG_TARGET_HAS_ext32u_i64 1
154
-#define TCG_TARGET_HAS_andc_i64 1
155
-#define TCG_TARGET_HAS_eqv_i64 1
156
-#define TCG_TARGET_HAS_nand_i64 1
157
-#define TCG_TARGET_HAS_nor_i64 1
158
-#define TCG_TARGET_HAS_clz_i64 1
159
-#define TCG_TARGET_HAS_ctz_i64 1
160
-#define TCG_TARGET_HAS_ctpop_i64 1
161
-#define TCG_TARGET_HAS_not_i64 1
162
-#define TCG_TARGET_HAS_orc_i64 1
163
-#define TCG_TARGET_HAS_rot_i64 1
164
-#define TCG_TARGET_HAS_negsetcond_i64 0
165
-#define TCG_TARGET_HAS_muls2_i64 1
166
-#define TCG_TARGET_HAS_add2_i32 1
167
-#define TCG_TARGET_HAS_sub2_i32 1
168
-#define TCG_TARGET_HAS_mulu2_i32 1
169
-#define TCG_TARGET_HAS_add2_i64 1
170
-#define TCG_TARGET_HAS_sub2_i64 1
171
-#define TCG_TARGET_HAS_mulu2_i64 1
172
-#define TCG_TARGET_HAS_muluh_i64 0
173
-#define TCG_TARGET_HAS_mulsh_i64 0
174
-#else
175
-#define TCG_TARGET_HAS_mulu2_i32 1
176
-#endif /* TCG_TARGET_REG_BITS == 64 */
177
-
178
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
179
-
180
-#define TCG_TARGET_HAS_tst 1
181
+#include "tcg-target-has.h"
182
183
/* Number of registers available. */
184
#define TCG_TARGET_NB_REGS 16
185
--
186
2.43.0
187
188
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Message-ID: <20250108215156.8731-14-philmd@linaro.org>
4
---
5
tcg/aarch64/tcg-target.h | 2 --
6
tcg/arm/tcg-target.h | 2 --
7
tcg/i386/tcg-target.h | 2 --
8
tcg/loongarch64/tcg-target.h | 2 --
9
tcg/mips/tcg-target.h | 2 --
10
tcg/ppc/tcg-target.h | 2 --
11
tcg/riscv/tcg-target.h | 2 --
12
tcg/s390x/tcg-target.h | 2 --
13
tcg/sparc64/tcg-target.h | 2 --
14
tcg/tcg-has.h | 2 ++
15
tcg/tci/tcg-target.h | 2 --
16
11 files changed, 2 insertions(+), 20 deletions(-)
1
17
18
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/aarch64/tcg-target.h
21
+++ b/tcg/aarch64/tcg-target.h
22
@@ -XXX,XX +XXX,XX @@ typedef enum {
23
24
#define TCG_TARGET_NB_REGS 64
25
26
-#include "tcg-target-has.h"
27
-
28
#define TCG_TARGET_DEFAULT_MO (0)
29
30
#endif /* AARCH64_TCG_TARGET_H */
31
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/arm/tcg-target.h
34
+++ b/tcg/arm/tcg-target.h
35
@@ -XXX,XX +XXX,XX @@ typedef enum {
36
37
#define TCG_TARGET_NB_REGS 32
38
39
-#include "tcg-target-has.h"
40
-
41
#define TCG_TARGET_DEFAULT_MO (0)
42
43
#endif
44
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/i386/tcg-target.h
47
+++ b/tcg/i386/tcg-target.h
48
@@ -XXX,XX +XXX,XX @@ typedef enum {
49
TCG_REG_CALL_STACK = TCG_REG_ESP
50
} TCGReg;
51
52
-#include "tcg-target-has.h"
53
-
54
/* This defines the natural memory order supported by this
55
* architecture before guarantees made by various barrier
56
* instructions.
57
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
58
index XXXXXXX..XXXXXXX 100644
59
--- a/tcg/loongarch64/tcg-target.h
60
+++ b/tcg/loongarch64/tcg-target.h
61
@@ -XXX,XX +XXX,XX @@ typedef enum {
62
TCG_VEC_TMP0 = TCG_REG_V23,
63
} TCGReg;
64
65
-#include "tcg-target-has.h"
66
-
67
#define TCG_TARGET_DEFAULT_MO (0)
68
69
#endif /* LOONGARCH_TCG_TARGET_H */
70
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
71
index XXXXXXX..XXXXXXX 100644
72
--- a/tcg/mips/tcg-target.h
73
+++ b/tcg/mips/tcg-target.h
74
@@ -XXX,XX +XXX,XX @@ typedef enum {
75
TCG_AREG0 = TCG_REG_S8,
76
} TCGReg;
77
78
-#include "tcg-target-has.h"
79
-
80
#define TCG_TARGET_DEFAULT_MO 0
81
82
#endif
83
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/ppc/tcg-target.h
86
+++ b/tcg/ppc/tcg-target.h
87
@@ -XXX,XX +XXX,XX @@ typedef enum {
88
TCG_AREG0 = TCG_REG_R27
89
} TCGReg;
90
91
-#include "tcg-target-has.h"
92
-
93
#define TCG_TARGET_DEFAULT_MO (0)
94
95
#endif
96
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
97
index XXXXXXX..XXXXXXX 100644
98
--- a/tcg/riscv/tcg-target.h
99
+++ b/tcg/riscv/tcg-target.h
100
@@ -XXX,XX +XXX,XX @@ typedef enum {
101
TCG_REG_TMP2 = TCG_REG_T4,
102
} TCGReg;
103
104
-#include "tcg-target-has.h"
105
-
106
#define TCG_TARGET_DEFAULT_MO (0)
107
108
#endif
109
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
110
index XXXXXXX..XXXXXXX 100644
111
--- a/tcg/s390x/tcg-target.h
112
+++ b/tcg/s390x/tcg-target.h
113
@@ -XXX,XX +XXX,XX @@ typedef enum TCGReg {
114
115
#define TCG_TARGET_NB_REGS 64
116
117
-#include "tcg-target-has.h"
118
-
119
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
120
121
#endif
122
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
123
index XXXXXXX..XXXXXXX 100644
124
--- a/tcg/sparc64/tcg-target.h
125
+++ b/tcg/sparc64/tcg-target.h
126
@@ -XXX,XX +XXX,XX @@ typedef enum {
127
128
#define TCG_AREG0 TCG_REG_I0
129
130
-#include "tcg-target-has.h"
131
-
132
#define TCG_TARGET_DEFAULT_MO (0)
133
134
#endif
135
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
136
index XXXXXXX..XXXXXXX 100644
137
--- a/tcg/tcg-has.h
138
+++ b/tcg/tcg-has.h
139
@@ -XXX,XX +XXX,XX @@
140
#ifndef TCG_HAS_H
141
#define TCG_HAS_H
142
143
+#include "tcg-target-has.h"
144
+
145
#if TCG_TARGET_REG_BITS == 32
146
/* Turn some undef macros into false macros. */
147
#define TCG_TARGET_HAS_extr_i64_i32 0
148
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
149
index XXXXXXX..XXXXXXX 100644
150
--- a/tcg/tci/tcg-target.h
151
+++ b/tcg/tci/tcg-target.h
152
@@ -XXX,XX +XXX,XX @@
153
#define TCG_TARGET_INSN_UNIT_SIZE 4
154
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
155
156
-#include "tcg-target-has.h"
157
-
158
/* Number of registers available. */
159
#define TCG_TARGET_NB_REGS 16
160
161
--
162
2.43.0
163
164
diff view generated by jsdifflib
New patch
1
TCG_TARGET_HAS_* definitions don't need to be exposed
2
by "tcg/tcg.h". Only include 'tcg-has.h' when necessary.
1
3
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Message-ID: <20250108215156.8731-15-philmd@linaro.org>
7
---
8
include/tcg/tcg.h | 2 --
9
tcg/optimize.c | 1 +
10
tcg/tcg-common.c | 1 +
11
tcg/tcg-op-gvec.c | 1 +
12
tcg/tcg-op-ldst.c | 2 +-
13
tcg/tcg-op-vec.c | 1 +
14
tcg/tcg-op.c | 2 +-
15
tcg/tcg.c | 1 +
16
tcg/tci.c | 1 +
17
9 files changed, 8 insertions(+), 4 deletions(-)
18
19
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/include/tcg/tcg.h
22
+++ b/include/tcg/tcg.h
23
@@ -XXX,XX +XXX,XX @@ typedef uint64_t TCGRegSet;
24
#error unsupported
25
#endif
26
27
-#include "tcg/tcg-has.h"
28
-
29
typedef enum TCGOpcode {
30
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
31
#include "tcg/tcg-opc.h"
32
diff --git a/tcg/optimize.c b/tcg/optimize.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/optimize.c
35
+++ b/tcg/optimize.c
36
@@ -XXX,XX +XXX,XX @@
37
#include "qemu/interval-tree.h"
38
#include "tcg/tcg-op-common.h"
39
#include "tcg-internal.h"
40
+#include "tcg-has.h"
41
42
#define CASE_OP_32_64(x) \
43
glue(glue(case INDEX_op_, x), _i32): \
44
diff --git a/tcg/tcg-common.c b/tcg/tcg-common.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/tcg-common.c
47
+++ b/tcg/tcg-common.c
48
@@ -XXX,XX +XXX,XX @@
49
50
#include "qemu/osdep.h"
51
#include "tcg/tcg.h"
52
+#include "tcg-has.h"
53
54
TCGOpDef tcg_op_defs[] = {
55
#define DEF(s, oargs, iargs, cargs, flags) \
56
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/tcg/tcg-op-gvec.c
59
+++ b/tcg/tcg-op-gvec.c
60
@@ -XXX,XX +XXX,XX @@
61
#include "tcg/tcg-op-common.h"
62
#include "tcg/tcg-op-gvec-common.h"
63
#include "tcg/tcg-gvec-desc.h"
64
+#include "tcg-has.h"
65
66
#define MAX_UNROLL 4
67
68
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tcg/tcg-op-ldst.c
71
+++ b/tcg/tcg-op-ldst.c
72
@@ -XXX,XX +XXX,XX @@
73
#include "exec/translation-block.h"
74
#include "exec/plugin-gen.h"
75
#include "tcg-internal.h"
76
-
77
+#include "tcg-has.h"
78
79
static void check_max_alignment(unsigned a_bits)
80
{
81
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/tcg/tcg-op-vec.c
84
+++ b/tcg/tcg-op-vec.c
85
@@ -XXX,XX +XXX,XX @@
86
#include "tcg/tcg-op-common.h"
87
#include "tcg/tcg-mo.h"
88
#include "tcg-internal.h"
89
+#include "tcg-has.h"
90
91
/*
92
* Vector optional opcode tracking.
93
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/tcg/tcg-op.c
96
+++ b/tcg/tcg-op.c
97
@@ -XXX,XX +XXX,XX @@
98
#include "exec/translation-block.h"
99
#include "exec/plugin-gen.h"
100
#include "tcg-internal.h"
101
-
102
+#include "tcg-has.h"
103
104
/*
105
* Encourage the compiler to tail-call to a function, rather than inlining.
106
diff --git a/tcg/tcg.c b/tcg/tcg.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/tcg/tcg.c
109
+++ b/tcg/tcg.c
110
@@ -XXX,XX +XXX,XX @@
111
#include "tcg/tcg-temp-internal.h"
112
#include "tcg-internal.h"
113
#include "tcg/perf.h"
114
+#include "tcg-has.h"
115
#ifdef CONFIG_USER_ONLY
116
#include "user/guest-base.h"
117
#endif
118
diff --git a/tcg/tci.c b/tcg/tci.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/tcg/tci.c
121
+++ b/tcg/tci.c
122
@@ -XXX,XX +XXX,XX @@
123
#include "tcg/helper-info.h"
124
#include "tcg/tcg-ldst.h"
125
#include "disas/dis-asm.h"
126
+#include "tcg-has.h"
127
#include <ffi.h>
128
129
130
--
131
2.43.0
132
133
diff view generated by jsdifflib
New patch
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
---
4
accel/tcg/internal-target.h | 1 +
5
tcg/aarch64/tcg-target-mo.h | 12 ++++++++++++
6
tcg/aarch64/tcg-target.h | 2 --
7
tcg/arm/tcg-target-mo.h | 13 +++++++++++++
8
tcg/arm/tcg-target.h | 2 --
9
tcg/i386/tcg-target-mo.h | 19 +++++++++++++++++++
10
tcg/i386/tcg-target.h | 11 -----------
11
tcg/loongarch64/tcg-target-mo.h | 12 ++++++++++++
12
tcg/loongarch64/tcg-target.h | 2 --
13
tcg/mips/tcg-target-mo.h | 13 +++++++++++++
14
tcg/mips/tcg-target.h | 2 --
15
tcg/ppc/tcg-target-mo.h | 12 ++++++++++++
16
tcg/ppc/tcg-target.h | 2 --
17
tcg/riscv/tcg-target-mo.h | 12 ++++++++++++
18
tcg/riscv/tcg-target.h | 2 --
19
tcg/s390x/tcg-target-mo.h | 12 ++++++++++++
20
tcg/s390x/tcg-target.h | 2 --
21
tcg/sparc64/tcg-target-mo.h | 12 ++++++++++++
22
tcg/sparc64/tcg-target.h | 2 --
23
tcg/tci/tcg-target-mo.h | 17 +++++++++++++++++
24
tcg/tci/tcg-target.h | 5 -----
25
tcg/tcg-op-ldst.c | 1 +
26
22 files changed, 136 insertions(+), 32 deletions(-)
27
create mode 100644 tcg/aarch64/tcg-target-mo.h
28
create mode 100644 tcg/arm/tcg-target-mo.h
29
create mode 100644 tcg/i386/tcg-target-mo.h
30
create mode 100644 tcg/loongarch64/tcg-target-mo.h
31
create mode 100644 tcg/mips/tcg-target-mo.h
32
create mode 100644 tcg/ppc/tcg-target-mo.h
33
create mode 100644 tcg/riscv/tcg-target-mo.h
34
create mode 100644 tcg/s390x/tcg-target-mo.h
35
create mode 100644 tcg/sparc64/tcg-target-mo.h
36
create mode 100644 tcg/tci/tcg-target-mo.h
1
37
38
diff --git a/accel/tcg/internal-target.h b/accel/tcg/internal-target.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/accel/tcg/internal-target.h
41
+++ b/accel/tcg/internal-target.h
42
@@ -XXX,XX +XXX,XX @@
43
#include "exec/exec-all.h"
44
#include "exec/translation-block.h"
45
#include "tb-internal.h"
46
+#include "tcg-target-mo.h"
47
48
/*
49
* Access to the various translations structures need to be serialised
50
diff --git a/tcg/aarch64/tcg-target-mo.h b/tcg/aarch64/tcg-target-mo.h
51
new file mode 100644
52
index XXXXXXX..XXXXXXX
53
--- /dev/null
54
+++ b/tcg/aarch64/tcg-target-mo.h
55
@@ -XXX,XX +XXX,XX @@
56
+/* SPDX-License-Identifier: GPL-2.0-or-later */
57
+/*
58
+ * Define target-specific memory model
59
+ * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH
60
+ */
61
+
62
+#ifndef TCG_TARGET_MO_H
63
+#define TCG_TARGET_MO_H
64
+
65
+#define TCG_TARGET_DEFAULT_MO 0
66
+
67
+#endif
68
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
69
index XXXXXXX..XXXXXXX 100644
70
--- a/tcg/aarch64/tcg-target.h
71
+++ b/tcg/aarch64/tcg-target.h
72
@@ -XXX,XX +XXX,XX @@ typedef enum {
73
74
#define TCG_TARGET_NB_REGS 64
75
76
-#define TCG_TARGET_DEFAULT_MO (0)
77
-
78
#endif /* AARCH64_TCG_TARGET_H */
79
diff --git a/tcg/arm/tcg-target-mo.h b/tcg/arm/tcg-target-mo.h
80
new file mode 100644
81
index XXXXXXX..XXXXXXX
82
--- /dev/null
83
+++ b/tcg/arm/tcg-target-mo.h
84
@@ -XXX,XX +XXX,XX @@
85
+/* SPDX-License-Identifier: MIT */
86
+/*
87
+ * Define target-specific memory model
88
+ * Copyright (c) 2008 Fabrice Bellard
89
+ * Copyright (c) 2008 Andrzej Zaborowski
90
+ */
91
+
92
+#ifndef TCG_TARGET_MO_H
93
+#define TCG_TARGET_MO_H
94
+
95
+#define TCG_TARGET_DEFAULT_MO 0
96
+
97
+#endif
98
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
99
index XXXXXXX..XXXXXXX 100644
100
--- a/tcg/arm/tcg-target.h
101
+++ b/tcg/arm/tcg-target.h
102
@@ -XXX,XX +XXX,XX @@ typedef enum {
103
104
#define TCG_TARGET_NB_REGS 32
105
106
-#define TCG_TARGET_DEFAULT_MO (0)
107
-
108
#endif
109
diff --git a/tcg/i386/tcg-target-mo.h b/tcg/i386/tcg-target-mo.h
110
new file mode 100644
111
index XXXXXXX..XXXXXXX
112
--- /dev/null
113
+++ b/tcg/i386/tcg-target-mo.h
114
@@ -XXX,XX +XXX,XX @@
115
+/* SPDX-License-Identifier: MIT */
116
+/*
117
+ * Define target-specific memory model
118
+ * Copyright (c) 2008 Fabrice Bellard
119
+ */
120
+
121
+#ifndef TCG_TARGET_MO_H
122
+#define TCG_TARGET_MO_H
123
+
124
+/*
125
+ * This defines the natural memory order supported by this architecture
126
+ * before guarantees made by various barrier instructions.
127
+ *
128
+ * The x86 has a pretty strong memory ordering which only really
129
+ * allows for some stores to be re-ordered after loads.
130
+ */
131
+#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
132
+
133
+#endif
134
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
135
index XXXXXXX..XXXXXXX 100644
136
--- a/tcg/i386/tcg-target.h
137
+++ b/tcg/i386/tcg-target.h
138
@@ -XXX,XX +XXX,XX @@ typedef enum {
139
TCG_REG_CALL_STACK = TCG_REG_ESP
140
} TCGReg;
141
142
-/* This defines the natural memory order supported by this
143
- * architecture before guarantees made by various barrier
144
- * instructions.
145
- *
146
- * The x86 has a pretty strong memory ordering which only really
147
- * allows for some stores to be re-ordered after loads.
148
- */
149
-#include "tcg/tcg-mo.h"
150
-
151
-#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
152
-
153
#endif
154
diff --git a/tcg/loongarch64/tcg-target-mo.h b/tcg/loongarch64/tcg-target-mo.h
155
new file mode 100644
156
index XXXXXXX..XXXXXXX
157
--- /dev/null
158
+++ b/tcg/loongarch64/tcg-target-mo.h
159
@@ -XXX,XX +XXX,XX @@
160
+/* SPDX-License-Identifier: MIT */
161
+/*
162
+ * Define target-specific memory model
163
+ * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
164
+ */
165
+
166
+#ifndef TCG_TARGET_MO_H
167
+#define TCG_TARGET_MO_H
168
+
169
+#define TCG_TARGET_DEFAULT_MO 0
170
+
171
+#endif
172
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
173
index XXXXXXX..XXXXXXX 100644
174
--- a/tcg/loongarch64/tcg-target.h
175
+++ b/tcg/loongarch64/tcg-target.h
176
@@ -XXX,XX +XXX,XX @@ typedef enum {
177
TCG_VEC_TMP0 = TCG_REG_V23,
178
} TCGReg;
179
180
-#define TCG_TARGET_DEFAULT_MO (0)
181
-
182
#endif /* LOONGARCH_TCG_TARGET_H */
183
diff --git a/tcg/mips/tcg-target-mo.h b/tcg/mips/tcg-target-mo.h
184
new file mode 100644
185
index XXXXXXX..XXXXXXX
186
--- /dev/null
187
+++ b/tcg/mips/tcg-target-mo.h
188
@@ -XXX,XX +XXX,XX @@
189
+/* SPDX-License-Identifier: MIT */
190
+/*
191
+ * Define target-specific memory model
192
+ * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
193
+ * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
194
+ */
195
+
196
+#ifndef TCG_TARGET_MO_H
197
+#define TCG_TARGET_MO_H
198
+
199
+#define TCG_TARGET_DEFAULT_MO 0
200
+
201
+#endif
202
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
203
index XXXXXXX..XXXXXXX 100644
204
--- a/tcg/mips/tcg-target.h
205
+++ b/tcg/mips/tcg-target.h
206
@@ -XXX,XX +XXX,XX @@ typedef enum {
207
TCG_AREG0 = TCG_REG_S8,
208
} TCGReg;
209
210
-#define TCG_TARGET_DEFAULT_MO 0
211
-
212
#endif
213
diff --git a/tcg/ppc/tcg-target-mo.h b/tcg/ppc/tcg-target-mo.h
214
new file mode 100644
215
index XXXXXXX..XXXXXXX
216
--- /dev/null
217
+++ b/tcg/ppc/tcg-target-mo.h
218
@@ -XXX,XX +XXX,XX @@
219
+/* SPDX-License-Identifier: MIT */
220
+/*
221
+ * Define target-specific memory model
222
+ * Copyright (c) 2008 Fabrice Bellard
223
+ */
224
+
225
+#ifndef TCG_TARGET_MO_H
226
+#define TCG_TARGET_MO_H
227
+
228
+#define TCG_TARGET_DEFAULT_MO 0
229
+
230
+#endif
231
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
232
index XXXXXXX..XXXXXXX 100644
233
--- a/tcg/ppc/tcg-target.h
234
+++ b/tcg/ppc/tcg-target.h
235
@@ -XXX,XX +XXX,XX @@ typedef enum {
236
TCG_AREG0 = TCG_REG_R27
237
} TCGReg;
238
239
-#define TCG_TARGET_DEFAULT_MO (0)
240
-
241
#endif
242
diff --git a/tcg/riscv/tcg-target-mo.h b/tcg/riscv/tcg-target-mo.h
243
new file mode 100644
244
index XXXXXXX..XXXXXXX
245
--- /dev/null
246
+++ b/tcg/riscv/tcg-target-mo.h
247
@@ -XXX,XX +XXX,XX @@
248
+/* SPDX-License-Identifier: MIT */
249
+/*
250
+ * Define target-specific memory model
251
+ * Copyright (c) 2018 SiFive, Inc
252
+ */
253
+
254
+#ifndef TCG_TARGET_MO_H
255
+#define TCG_TARGET_MO_H
256
+
257
+#define TCG_TARGET_DEFAULT_MO 0
258
+
259
+#endif
260
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
261
index XXXXXXX..XXXXXXX 100644
262
--- a/tcg/riscv/tcg-target.h
263
+++ b/tcg/riscv/tcg-target.h
264
@@ -XXX,XX +XXX,XX @@ typedef enum {
265
TCG_REG_TMP2 = TCG_REG_T4,
266
} TCGReg;
267
268
-#define TCG_TARGET_DEFAULT_MO (0)
269
-
270
#endif
271
diff --git a/tcg/s390x/tcg-target-mo.h b/tcg/s390x/tcg-target-mo.h
272
new file mode 100644
273
index XXXXXXX..XXXXXXX
274
--- /dev/null
275
+++ b/tcg/s390x/tcg-target-mo.h
276
@@ -XXX,XX +XXX,XX @@
277
+/* SPDX-License-Identifier: MIT */
278
+/*
279
+ * Define target-specific memory model
280
+ * Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
281
+ */
282
+
283
+#ifndef TCG_TARGET_MO_H
284
+#define TCG_TARGET_MO_H
285
+
286
+#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
287
+
288
+#endif
289
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
290
index XXXXXXX..XXXXXXX 100644
291
--- a/tcg/s390x/tcg-target.h
292
+++ b/tcg/s390x/tcg-target.h
293
@@ -XXX,XX +XXX,XX @@ typedef enum TCGReg {
294
295
#define TCG_TARGET_NB_REGS 64
296
297
-#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
298
-
299
#endif
300
diff --git a/tcg/sparc64/tcg-target-mo.h b/tcg/sparc64/tcg-target-mo.h
301
new file mode 100644
302
index XXXXXXX..XXXXXXX
303
--- /dev/null
304
+++ b/tcg/sparc64/tcg-target-mo.h
305
@@ -XXX,XX +XXX,XX @@
306
+/* SPDX-License-Identifier: MIT */
307
+/*
308
+ * Define target-specific memory model
309
+ * Copyright (c) 2008 Fabrice Bellard
310
+ */
311
+
312
+#ifndef TCG_TARGET_MO_H
313
+#define TCG_TARGET_MO_H
314
+
315
+#define TCG_TARGET_DEFAULT_MO 0
316
+
317
+#endif
318
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
319
index XXXXXXX..XXXXXXX 100644
320
--- a/tcg/sparc64/tcg-target.h
321
+++ b/tcg/sparc64/tcg-target.h
322
@@ -XXX,XX +XXX,XX @@ typedef enum {
323
324
#define TCG_AREG0 TCG_REG_I0
325
326
-#define TCG_TARGET_DEFAULT_MO (0)
327
-
328
#endif
329
diff --git a/tcg/tci/tcg-target-mo.h b/tcg/tci/tcg-target-mo.h
330
new file mode 100644
331
index XXXXXXX..XXXXXXX
332
--- /dev/null
333
+++ b/tcg/tci/tcg-target-mo.h
334
@@ -XXX,XX +XXX,XX @@
335
+/* SPDX-License-Identifier: MIT */
336
+/*
337
+ * Define target-specific memory model
338
+ * Copyright (c) 2009, 2011 Stefan Weil
339
+ */
340
+
341
+#ifndef TCG_TARGET_MO_H
342
+#define TCG_TARGET_MO_H
343
+
344
+/*
345
+ * We could notice __i386__ or __s390x__ and reduce the barriers depending
346
+ * on the host. But if you want performance, you use the normal backend.
347
+ * We prefer consistency across hosts on this.
348
+ */
349
+#define TCG_TARGET_DEFAULT_MO 0
350
+
351
+#endif
352
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
353
index XXXXXXX..XXXXXXX 100644
354
--- a/tcg/tci/tcg-target.h
355
+++ b/tcg/tci/tcg-target.h
356
@@ -XXX,XX +XXX,XX @@ typedef enum {
357
#define HAVE_TCG_QEMU_TB_EXEC
358
#define TCG_TARGET_NEED_POOL_LABELS
359
360
-/* We could notice __i386__ or __s390x__ and reduce the barriers depending
361
- on the host. But if you want performance, you use the normal backend.
362
- We prefer consistency across hosts on this. */
363
-#define TCG_TARGET_DEFAULT_MO (0)
364
-
365
#endif /* TCG_TARGET_H */
366
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c
367
index XXXXXXX..XXXXXXX 100644
368
--- a/tcg/tcg-op-ldst.c
369
+++ b/tcg/tcg-op-ldst.c
370
@@ -XXX,XX +XXX,XX @@
371
#include "exec/plugin-gen.h"
372
#include "tcg-internal.h"
373
#include "tcg-has.h"
374
+#include "tcg-target-mo.h"
375
376
static void check_max_alignment(unsigned a_bits)
377
{
378
--
379
2.43.0
380
381
diff view generated by jsdifflib
1
This structure will shortly contain more than just
1
Return C_NotImplemented instead of asserting for opcodes
2
data for accessing MMIO. Rename the 'addr' member
2
not implemented by the backend. For now, the assertion
3
to 'xlat_section' to more clearly indicate its purpose.
3
moves to process_op_defs.
4
4
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
include/exec/cpu-defs.h | 22 ++++----
8
tcg/tcg.c | 10 ++++++----
11
accel/tcg/cputlb.c | 102 +++++++++++++++++++------------------
9
tcg/aarch64/tcg-target.c.inc | 2 +-
12
target/arm/mte_helper.c | 14 ++---
10
tcg/arm/tcg-target.c.inc | 2 +-
13
target/arm/sve_helper.c | 4 +-
11
tcg/i386/tcg-target.c.inc | 2 +-
14
target/arm/translate-a64.c | 2 +-
12
tcg/loongarch64/tcg-target.c.inc | 2 +-
15
5 files changed, 73 insertions(+), 71 deletions(-)
13
tcg/mips/tcg-target.c.inc | 2 +-
14
tcg/ppc/tcg-target.c.inc | 2 +-
15
tcg/riscv/tcg-target.c.inc | 2 +-
16
tcg/s390x/tcg-target.c.inc | 2 +-
17
tcg/sparc64/tcg-target.c.inc | 2 +-
18
tcg/tci/tcg-target.c.inc | 2 +-
19
11 files changed, 16 insertions(+), 14 deletions(-)
16
20
17
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
21
diff --git a/tcg/tcg.c b/tcg/tcg.c
18
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-defs.h
23
--- a/tcg/tcg.c
20
+++ b/include/exec/cpu-defs.h
24
+++ b/tcg/tcg.c
21
@@ -XXX,XX +XXX,XX @@ typedef uint64_t target_ulong;
25
@@ -XXX,XX +XXX,XX @@ static int tcg_out_pool_finalize(TCGContext *s)
22
# endif
26
#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) C_PFX6(c_n1_o1_i4_, O1, O2, I1, I2, I3, I4),
23
# endif
27
24
28
typedef enum {
25
+/* Minimalized TLB entry for use by TCG fast path. */
29
+ C_NotImplemented = -1,
26
typedef struct CPUTLBEntry {
30
#include "tcg-target-con-set.h"
27
/* bit TARGET_LONG_BITS to TARGET_PAGE_BITS : virtual address
31
} TCGConstraintSetIndex;
28
bit TARGET_PAGE_BITS-1..4 : Nonzero for accesses that should not
32
29
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntry {
33
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
30
34
const TCGTargetOpDef *tdefs;
31
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
35
bool saw_alias_pair = false;
32
36
int i, o, i2, o2, nb_args;
33
-/* The IOTLB is not accessed directly inline by generated TCG code,
37
+ TCGConstraintSetIndex con_set;
34
- * so the CPUIOTLBEntry layout is not as critical as that of the
38
35
- * CPUTLBEntry. (This is also why we don't want to combine the two
39
if (def->flags & TCG_OPF_NOT_PRESENT) {
36
- * structs into one.)
40
continue;
37
+/*
41
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
38
+ * The full TLB entry, which is not accessed by generated TCG code,
42
39
+ * so the layout is not as critical as that of CPUTLBEntry. This is
43
/*
40
+ * also why we don't want to combine the two structs.
44
* Macro magic should make it impossible, but double-check that
41
*/
45
- * the array index is in range. Since the signness of an enum
42
-typedef struct CPUIOTLBEntry {
46
- * is implementation defined, force the result to unsigned.
43
+typedef struct CPUTLBEntryFull {
47
+ * the array index is in range. At the same time, double-check
44
/*
48
+ * that the opcode is implemented, i.e. not C_NotImplemented.
45
- * @addr contains:
49
*/
46
+ * @xlat_section contains:
50
- unsigned con_set = tcg_target_op_def(op);
47
* - in the lower TARGET_PAGE_BITS, a physical section number
51
- tcg_debug_assert(con_set < ARRAY_SIZE(constraint_sets));
48
* - with the lower TARGET_PAGE_BITS masked off, an offset which
52
+ con_set = tcg_target_op_def(op);
49
* must be added to the virtual address to obtain:
53
+ tcg_debug_assert(con_set >= 0 && con_set < ARRAY_SIZE(constraint_sets));
50
@@ -XXX,XX +XXX,XX @@ typedef struct CPUIOTLBEntry {
54
tdefs = &constraint_sets[con_set];
51
* number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
55
52
* + the offset within the target MemoryRegion (otherwise)
56
for (i = 0; i < nb_args; i++) {
53
*/
57
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
54
- hwaddr addr;
55
+ hwaddr xlat_section;
56
MemTxAttrs attrs;
57
-} CPUIOTLBEntry;
58
+} CPUTLBEntryFull;
59
60
/*
61
* Data elements that are per MMU mode, minus the bits accessed by
62
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBDesc {
63
size_t vindex;
64
/* The tlb victim table, in two parts. */
65
CPUTLBEntry vtable[CPU_VTLB_SIZE];
66
- CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
67
- /* The iotlb. */
68
- CPUIOTLBEntry *iotlb;
69
+ CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
70
+ CPUTLBEntryFull *fulltlb;
71
} CPUTLBDesc;
72
73
/*
74
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
75
index XXXXXXX..XXXXXXX 100644
58
index XXXXXXX..XXXXXXX 100644
76
--- a/accel/tcg/cputlb.c
59
--- a/tcg/aarch64/tcg-target.c.inc
77
+++ b/accel/tcg/cputlb.c
60
+++ b/tcg/aarch64/tcg-target.c.inc
78
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
61
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
79
}
62
return C_O1_I2(w, 0, w);
80
63
81
g_free(fast->table);
64
default:
82
- g_free(desc->iotlb);
65
- g_assert_not_reached();
83
+ g_free(desc->fulltlb);
66
+ return C_NotImplemented;
84
85
tlb_window_reset(desc, now, 0);
86
/* desc->n_used_entries is cleared by the caller */
87
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
88
fast->table = g_try_new(CPUTLBEntry, new_size);
89
- desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
90
+ desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
91
92
/*
93
* If the allocations fail, try smaller sizes. We just freed some
94
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
95
* allocations to fail though, so we progressively reduce the allocation
96
* size, aborting if we cannot even allocate the smallest TLB we support.
97
*/
98
- while (fast->table == NULL || desc->iotlb == NULL) {
99
+ while (fast->table == NULL || desc->fulltlb == NULL) {
100
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
101
error_report("%s: %s", __func__, strerror(errno));
102
abort();
103
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
104
fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
105
106
g_free(fast->table);
107
- g_free(desc->iotlb);
108
+ g_free(desc->fulltlb);
109
fast->table = g_try_new(CPUTLBEntry, new_size);
110
- desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
111
+ desc->fulltlb = g_try_new(CPUTLBEntryFull, new_size);
112
}
67
}
113
}
68
}
114
69
115
@@ -XXX,XX +XXX,XX @@ static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
70
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
116
desc->n_used_entries = 0;
71
index XXXXXXX..XXXXXXX 100644
117
fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
72
--- a/tcg/arm/tcg-target.c.inc
118
fast->table = g_new(CPUTLBEntry, n_entries);
73
+++ b/tcg/arm/tcg-target.c.inc
119
- desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
74
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
120
+ desc->fulltlb = g_new(CPUTLBEntryFull, n_entries);
75
case INDEX_op_bitsel_vec:
121
tlb_mmu_flush_locked(desc, fast);
76
return C_O1_I3(w, w, w, w);
122
}
77
default:
123
78
- g_assert_not_reached();
124
@@ -XXX,XX +XXX,XX @@ void tlb_destroy(CPUState *cpu)
79
+ return C_NotImplemented;
125
CPUTLBDescFast *fast = &env_tlb(env)->f[i];
126
127
g_free(fast->table);
128
- g_free(desc->iotlb);
129
+ g_free(desc->fulltlb);
130
}
80
}
131
}
81
}
132
82
133
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
83
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
134
84
index XXXXXXX..XXXXXXX 100644
135
/* Evict the old entry into the victim tlb. */
85
--- a/tcg/i386/tcg-target.c.inc
136
copy_tlb_helper_locked(tv, te);
86
+++ b/tcg/i386/tcg-target.c.inc
137
- desc->viotlb[vidx] = desc->iotlb[index];
87
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
138
+ desc->vfulltlb[vidx] = desc->fulltlb[index];
88
return C_O1_I4(x, x, x, xO, x);
139
tlb_n_used_entries_dec(env, mmu_idx);
89
140
}
90
default:
141
91
- g_assert_not_reached();
142
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
92
+ return C_NotImplemented;
143
* subtract here is that of the page base, and not the same as the
144
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
145
*/
146
- desc->iotlb[index].addr = iotlb - vaddr_page;
147
- desc->iotlb[index].attrs = attrs;
148
+ desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
149
+ desc->fulltlb[index].attrs = attrs;
150
151
/* Now calculate the new entry */
152
tn.addend = addend - vaddr_page;
153
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
154
}
93
}
155
}
94
}
156
95
157
-static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
96
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
158
+static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
97
index XXXXXXX..XXXXXXX 100644
159
int mmu_idx, target_ulong addr, uintptr_t retaddr,
98
--- a/tcg/loongarch64/tcg-target.c.inc
160
MMUAccessType access_type, MemOp op)
99
+++ b/tcg/loongarch64/tcg-target.c.inc
161
{
100
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
162
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
101
return C_O1_I3(w, w, w, w);
163
bool locked = false;
102
164
MemTxResult r;
103
default:
165
104
- g_assert_not_reached();
166
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
105
+ return C_NotImplemented;
167
+ section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
168
mr = section->mr;
169
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
170
+ mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
171
cpu->mem_io_pc = retaddr;
172
if (!cpu->can_do_io) {
173
cpu_io_recompile(cpu, retaddr);
174
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
175
qemu_mutex_lock_iothread();
176
locked = true;
177
}
106
}
178
- r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
107
}
179
+ r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
108
180
if (r != MEMTX_OK) {
109
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
181
hwaddr physaddr = mr_offset +
110
index XXXXXXX..XXXXXXX 100644
182
section->offset_within_address_space -
111
--- a/tcg/mips/tcg-target.c.inc
183
section->offset_within_region;
112
+++ b/tcg/mips/tcg-target.c.inc
184
113
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
185
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
114
: C_O0_I4(rZ, rZ, r, r));
186
- mmu_idx, iotlbentry->attrs, r, retaddr);
115
187
+ mmu_idx, full->attrs, r, retaddr);
116
default:
117
- g_assert_not_reached();
118
+ return C_NotImplemented;
188
}
119
}
189
if (locked) {
190
qemu_mutex_unlock_iothread();
191
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
192
}
120
}
193
121
194
/*
122
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
195
- * Save a potentially trashed IOTLB entry for later lookup by plugin.
123
index XXXXXXX..XXXXXXX 100644
196
- * This is read by tlb_plugin_lookup if the iotlb entry doesn't match
124
--- a/tcg/ppc/tcg-target.c.inc
197
+ * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
125
+++ b/tcg/ppc/tcg-target.c.inc
198
+ * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
126
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
199
* because of the side effect of io_writex changing memory layout.
127
return C_O1_I4(v, v, v, vZM, v);
200
*/
128
201
static void save_iotlb_data(CPUState *cs, hwaddr addr,
129
default:
202
@@ -XXX,XX +XXX,XX @@ static void save_iotlb_data(CPUState *cs, hwaddr addr,
130
- g_assert_not_reached();
203
#endif
131
+ return C_NotImplemented;
132
}
204
}
133
}
205
134
206
-static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
135
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
207
+static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
136
index XXXXXXX..XXXXXXX 100644
208
int mmu_idx, uint64_t val, target_ulong addr,
137
--- a/tcg/riscv/tcg-target.c.inc
209
uintptr_t retaddr, MemOp op)
138
+++ b/tcg/riscv/tcg-target.c.inc
210
{
139
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
211
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
140
case INDEX_op_cmpsel_vec:
212
bool locked = false;
141
return C_O1_I4(v, v, vL, vK, vK);
213
MemTxResult r;
142
default:
214
143
- g_assert_not_reached();
215
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
144
+ return C_NotImplemented;
216
+ section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
217
mr = section->mr;
218
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
219
+ mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
220
if (!cpu->can_do_io) {
221
cpu_io_recompile(cpu, retaddr);
222
}
145
}
223
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
146
}
224
* The memory_region_dispatch may trigger a flush/resize
147
225
* so for plugins we save the iotlb_data just in case.
148
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
226
*/
149
index XXXXXXX..XXXXXXX 100644
227
- save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
150
--- a/tcg/s390x/tcg-target.c.inc
228
+ save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
151
+++ b/tcg/s390x/tcg-target.c.inc
229
152
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
230
if (!qemu_mutex_iothread_locked()) {
153
: C_O1_I4(v, v, v, vZ, v));
231
qemu_mutex_lock_iothread();
154
232
locked = true;
155
default:
156
- g_assert_not_reached();
157
+ return C_NotImplemented;
233
}
158
}
234
- r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
159
}
235
+ r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
160
236
if (r != MEMTX_OK) {
161
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
237
hwaddr physaddr = mr_offset +
162
index XXXXXXX..XXXXXXX 100644
238
section->offset_within_address_space -
163
--- a/tcg/sparc64/tcg-target.c.inc
239
section->offset_within_region;
164
+++ b/tcg/sparc64/tcg-target.c.inc
240
165
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
241
cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
166
return C_O1_I2(r, r, r);
242
- MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
167
243
+ MMU_DATA_STORE, mmu_idx, full->attrs, r,
168
default:
244
retaddr);
169
- g_assert_not_reached();
170
+ return C_NotImplemented;
245
}
171
}
246
if (locked) {
172
}
247
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
173
248
copy_tlb_helper_locked(vtlb, &tmptlb);
174
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
249
qemu_spin_unlock(&env_tlb(env)->c.lock);
175
index XXXXXXX..XXXXXXX 100644
250
176
--- a/tcg/tci/tcg-target.c.inc
251
- CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
177
+++ b/tcg/tci/tcg-target.c.inc
252
- CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
178
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
253
- tmpio = *io; *io = *vio; *vio = tmpio;
179
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
254
+ CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
180
255
+ CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
181
default:
256
+ CPUTLBEntryFull tmpf;
182
- g_assert_not_reached();
257
+ tmpf = *f1; *f1 = *f2; *f2 = tmpf;
183
+ return C_NotImplemented;
258
return true;
259
}
260
}
184
}
261
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
262
(ADDR) & TARGET_PAGE_MASK)
263
264
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
265
- CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
266
+ CPUTLBEntryFull *full, uintptr_t retaddr)
267
{
268
- ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
269
+ ram_addr_t ram_addr = mem_vaddr + full->xlat_section;
270
271
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
272
273
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
274
/* Handle clean RAM pages. */
275
if (unlikely(flags & TLB_NOTDIRTY)) {
276
uintptr_t index = tlb_index(env, mmu_idx, addr);
277
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
278
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
279
280
- notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
281
+ notdirty_write(env_cpu(env), addr, 1, full, retaddr);
282
flags &= ~TLB_NOTDIRTY;
283
}
284
285
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
286
287
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
288
uintptr_t index = tlb_index(env, mmu_idx, addr);
289
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
290
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
291
292
/* Handle watchpoints. */
293
if (flags & TLB_WATCHPOINT) {
294
int wp_access = (access_type == MMU_DATA_STORE
295
? BP_MEM_WRITE : BP_MEM_READ);
296
cpu_check_watchpoint(env_cpu(env), addr, size,
297
- iotlbentry->attrs, wp_access, retaddr);
298
+ full->attrs, wp_access, retaddr);
299
}
300
301
/* Handle clean RAM pages. */
302
if (flags & TLB_NOTDIRTY) {
303
- notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
304
+ notdirty_write(env_cpu(env), addr, 1, full, retaddr);
305
}
306
}
307
308
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
309
* should have just filled the TLB. The one corner case is io_writex
310
* which can cause TLB flushes and potential resizing of the TLBs
311
* losing the information we need. In those cases we need to recover
312
- * data from a copy of the iotlbentry. As long as this always occurs
313
+ * data from a copy of the CPUTLBEntryFull. As long as this always occurs
314
* from the same thread (which a mem callback will be) this is safe.
315
*/
316
317
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
318
if (likely(tlb_hit(tlb_addr, addr))) {
319
/* We must have an iotlb entry for MMIO */
320
if (tlb_addr & TLB_MMIO) {
321
- CPUIOTLBEntry *iotlbentry;
322
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
323
+ CPUTLBEntryFull *full;
324
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
325
data->is_io = true;
326
- data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
327
- data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
328
+ data->v.io.section =
329
+ iotlb_to_section(cpu, full->xlat_section, full->attrs);
330
+ data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
331
} else {
332
data->is_io = false;
333
data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
334
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
335
336
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
337
notdirty_write(env_cpu(env), addr, size,
338
- &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
339
+ &env_tlb(env)->d[mmu_idx].fulltlb[index], retaddr);
340
}
341
342
return hostaddr;
343
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
344
345
/* Handle anything that isn't just a straight memory access. */
346
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
347
- CPUIOTLBEntry *iotlbentry;
348
+ CPUTLBEntryFull *full;
349
bool need_swap;
350
351
/* For anything that is unaligned, recurse through full_load. */
352
@@ -XXX,XX +XXX,XX @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
353
goto do_unaligned_access;
354
}
355
356
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
357
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
358
359
/* Handle watchpoints. */
360
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
361
/* On watchpoint hit, this will longjmp out. */
362
cpu_check_watchpoint(env_cpu(env), addr, size,
363
- iotlbentry->attrs, BP_MEM_READ, retaddr);
364
+ full->attrs, BP_MEM_READ, retaddr);
365
}
366
367
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
368
369
/* Handle I/O access. */
370
if (likely(tlb_addr & TLB_MMIO)) {
371
- return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
372
+ return io_readx(env, full, mmu_idx, addr, retaddr,
373
access_type, op ^ (need_swap * MO_BSWAP));
374
}
375
376
@@ -XXX,XX +XXX,XX @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
377
*/
378
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
379
cpu_check_watchpoint(env_cpu(env), addr, size - size2,
380
- env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
381
+ env_tlb(env)->d[mmu_idx].fulltlb[index].attrs,
382
BP_MEM_WRITE, retaddr);
383
}
384
if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
385
cpu_check_watchpoint(env_cpu(env), page2, size2,
386
- env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
387
+ env_tlb(env)->d[mmu_idx].fulltlb[index2].attrs,
388
BP_MEM_WRITE, retaddr);
389
}
390
391
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
392
393
/* Handle anything that isn't just a straight memory access. */
394
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
395
- CPUIOTLBEntry *iotlbentry;
396
+ CPUTLBEntryFull *full;
397
bool need_swap;
398
399
/* For anything that is unaligned, recurse through byte stores. */
400
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
401
goto do_unaligned_access;
402
}
403
404
- iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
405
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
406
407
/* Handle watchpoints. */
408
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
409
/* On watchpoint hit, this will longjmp out. */
410
cpu_check_watchpoint(env_cpu(env), addr, size,
411
- iotlbentry->attrs, BP_MEM_WRITE, retaddr);
412
+ full->attrs, BP_MEM_WRITE, retaddr);
413
}
414
415
need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
416
417
/* Handle I/O access. */
418
if (tlb_addr & TLB_MMIO) {
419
- io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
420
+ io_writex(env, full, mmu_idx, val, addr, retaddr,
421
op ^ (need_swap * MO_BSWAP));
422
return;
423
}
424
@@ -XXX,XX +XXX,XX @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
425
426
/* Handle clean RAM pages. */
427
if (tlb_addr & TLB_NOTDIRTY) {
428
- notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
429
+ notdirty_write(env_cpu(env), addr, size, full, retaddr);
430
}
431
432
haddr = (void *)((uintptr_t)addr + entry->addend);
433
diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
434
index XXXXXXX..XXXXXXX 100644
435
--- a/target/arm/mte_helper.c
436
+++ b/target/arm/mte_helper.c
437
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
438
return tags + index;
439
#else
440
uintptr_t index;
441
- CPUIOTLBEntry *iotlbentry;
442
+ CPUTLBEntryFull *full;
443
int in_page, flags;
444
ram_addr_t ptr_ra;
445
hwaddr ptr_paddr, tag_paddr, xlat;
446
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
447
assert(!(flags & TLB_INVALID_MASK));
448
449
/*
450
- * Find the iotlbentry for ptr. This *must* be present in the TLB
451
+ * Find the CPUTLBEntryFull for ptr. This *must* be present in the TLB
452
* because we just found the mapping.
453
* TODO: Perhaps there should be a cputlb helper that returns a
454
* matching tlb entry + iotlb entry.
455
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
456
g_assert(tlb_hit(comparator, ptr));
457
}
458
# endif
459
- iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index];
460
+ full = &env_tlb(env)->d[ptr_mmu_idx].fulltlb[index];
461
462
/* If the virtual page MemAttr != Tagged, access unchecked. */
463
- if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) {
464
+ if (!arm_tlb_mte_tagged(&full->attrs)) {
465
return NULL;
466
}
467
468
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
469
int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
470
assert(ra != 0);
471
cpu_check_watchpoint(env_cpu(env), ptr, ptr_size,
472
- iotlbentry->attrs, wp, ra);
473
+ full->attrs, wp, ra);
474
}
475
476
/*
477
@@ -XXX,XX +XXX,XX @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
478
tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
479
480
/* Look up the address in tag space. */
481
- tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
482
+ tag_asi = full->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
483
tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
484
mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
485
tag_access == MMU_DATA_STORE,
486
- iotlbentry->attrs);
487
+ full->attrs);
488
489
/*
490
* Note that @mr will never be NULL. If there is nothing in the address
491
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
492
index XXXXXXX..XXXXXXX 100644
493
--- a/target/arm/sve_helper.c
494
+++ b/target/arm/sve_helper.c
495
@@ -XXX,XX +XXX,XX @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
496
g_assert(tlb_hit(comparator, addr));
497
# endif
498
499
- CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
500
- info->attrs = iotlbentry->attrs;
501
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
502
+ info->attrs = full->attrs;
503
}
504
#endif
505
506
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
507
index XXXXXXX..XXXXXXX 100644
508
--- a/target/arm/translate-a64.c
509
+++ b/target/arm/translate-a64.c
510
@@ -XXX,XX +XXX,XX @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
511
* table entry even for that case.
512
*/
513
return (tlb_hit(entry->addr_code, addr) &&
514
- arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
515
+ arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs));
516
#endif
517
}
185
}
518
186
519
--
187
--
520
2.34.1
188
2.43.0
521
189
522
190
diff view generated by jsdifflib
New patch
1
Test each vector type, not just lumping them all together.
2
Add tests for I32 (always true) and I64 (64-bit hosts).
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/tcg.c | 66 ++++++++++++++++++++++++++++++++++++-------------------
8
1 file changed, 43 insertions(+), 23 deletions(-)
9
10
diff --git a/tcg/tcg.c b/tcg/tcg.c
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/tcg.c
13
+++ b/tcg/tcg.c
14
@@ -XXX,XX +XXX,XX @@ TCGTemp *tcgv_i32_temp(TCGv_i32 v)
15
*/
16
bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
17
{
18
- const bool have_vec
19
- = TCG_TARGET_HAS_v64 | TCG_TARGET_HAS_v128 | TCG_TARGET_HAS_v256;
20
+ bool has_type;
21
+
22
+ switch (type) {
23
+ case TCG_TYPE_I32:
24
+ has_type = true;
25
+ break;
26
+ case TCG_TYPE_I64:
27
+ has_type = TCG_TARGET_REG_BITS == 64;
28
+ break;
29
+ case TCG_TYPE_V64:
30
+ has_type = TCG_TARGET_HAS_v64;
31
+ break;
32
+ case TCG_TYPE_V128:
33
+ has_type = TCG_TARGET_HAS_v128;
34
+ break;
35
+ case TCG_TYPE_V256:
36
+ has_type = TCG_TARGET_HAS_v256;
37
+ break;
38
+ default:
39
+ has_type = false;
40
+ break;
41
+ }
42
43
switch (op) {
44
case INDEX_op_discard:
45
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
46
case INDEX_op_or_vec:
47
case INDEX_op_xor_vec:
48
case INDEX_op_cmp_vec:
49
- return have_vec;
50
+ return has_type;
51
case INDEX_op_dup2_vec:
52
- return have_vec && TCG_TARGET_REG_BITS == 32;
53
+ return has_type && TCG_TARGET_REG_BITS == 32;
54
case INDEX_op_not_vec:
55
- return have_vec && TCG_TARGET_HAS_not_vec;
56
+ return has_type && TCG_TARGET_HAS_not_vec;
57
case INDEX_op_neg_vec:
58
- return have_vec && TCG_TARGET_HAS_neg_vec;
59
+ return has_type && TCG_TARGET_HAS_neg_vec;
60
case INDEX_op_abs_vec:
61
- return have_vec && TCG_TARGET_HAS_abs_vec;
62
+ return has_type && TCG_TARGET_HAS_abs_vec;
63
case INDEX_op_andc_vec:
64
- return have_vec && TCG_TARGET_HAS_andc_vec;
65
+ return has_type && TCG_TARGET_HAS_andc_vec;
66
case INDEX_op_orc_vec:
67
- return have_vec && TCG_TARGET_HAS_orc_vec;
68
+ return has_type && TCG_TARGET_HAS_orc_vec;
69
case INDEX_op_nand_vec:
70
- return have_vec && TCG_TARGET_HAS_nand_vec;
71
+ return has_type && TCG_TARGET_HAS_nand_vec;
72
case INDEX_op_nor_vec:
73
- return have_vec && TCG_TARGET_HAS_nor_vec;
74
+ return has_type && TCG_TARGET_HAS_nor_vec;
75
case INDEX_op_eqv_vec:
76
- return have_vec && TCG_TARGET_HAS_eqv_vec;
77
+ return has_type && TCG_TARGET_HAS_eqv_vec;
78
case INDEX_op_mul_vec:
79
- return have_vec && TCG_TARGET_HAS_mul_vec;
80
+ return has_type && TCG_TARGET_HAS_mul_vec;
81
case INDEX_op_shli_vec:
82
case INDEX_op_shri_vec:
83
case INDEX_op_sari_vec:
84
- return have_vec && TCG_TARGET_HAS_shi_vec;
85
+ return has_type && TCG_TARGET_HAS_shi_vec;
86
case INDEX_op_shls_vec:
87
case INDEX_op_shrs_vec:
88
case INDEX_op_sars_vec:
89
- return have_vec && TCG_TARGET_HAS_shs_vec;
90
+ return has_type && TCG_TARGET_HAS_shs_vec;
91
case INDEX_op_shlv_vec:
92
case INDEX_op_shrv_vec:
93
case INDEX_op_sarv_vec:
94
- return have_vec && TCG_TARGET_HAS_shv_vec;
95
+ return has_type && TCG_TARGET_HAS_shv_vec;
96
case INDEX_op_rotli_vec:
97
- return have_vec && TCG_TARGET_HAS_roti_vec;
98
+ return has_type && TCG_TARGET_HAS_roti_vec;
99
case INDEX_op_rotls_vec:
100
- return have_vec && TCG_TARGET_HAS_rots_vec;
101
+ return has_type && TCG_TARGET_HAS_rots_vec;
102
case INDEX_op_rotlv_vec:
103
case INDEX_op_rotrv_vec:
104
- return have_vec && TCG_TARGET_HAS_rotv_vec;
105
+ return has_type && TCG_TARGET_HAS_rotv_vec;
106
case INDEX_op_ssadd_vec:
107
case INDEX_op_usadd_vec:
108
case INDEX_op_sssub_vec:
109
case INDEX_op_ussub_vec:
110
- return have_vec && TCG_TARGET_HAS_sat_vec;
111
+ return has_type && TCG_TARGET_HAS_sat_vec;
112
case INDEX_op_smin_vec:
113
case INDEX_op_umin_vec:
114
case INDEX_op_smax_vec:
115
case INDEX_op_umax_vec:
116
- return have_vec && TCG_TARGET_HAS_minmax_vec;
117
+ return has_type && TCG_TARGET_HAS_minmax_vec;
118
case INDEX_op_bitsel_vec:
119
- return have_vec && TCG_TARGET_HAS_bitsel_vec;
120
+ return has_type && TCG_TARGET_HAS_bitsel_vec;
121
case INDEX_op_cmpsel_vec:
122
- return have_vec && TCG_TARGET_HAS_cmpsel_vec;
123
+ return has_type && TCG_TARGET_HAS_cmpsel_vec;
124
125
default:
126
tcg_debug_assert(op > INDEX_op_last_generic && op < NB_OPS);
127
--
128
2.43.0
129
130
diff view generated by jsdifflib
1
This function has two users, who use it incompatibly.
1
Process each TCGConstraintSetIndex first. Allocate TCGArgConstraint
2
In tlb_flush_page_by_mmuidx_async_0, when flushing a
2
arrays based on those. Only afterward process the TCGOpcodes and
3
single page, we need to flush exactly two pages.
3
share those TCGArgConstraint arrays.
4
In tlb_flush_range_by_mmuidx_async_0, when flushing a
5
range of pages, we need to flush N+1 pages.
6
4
7
This avoids double-flushing of jmp cache pages in a range.
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
9
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
7
---
12
accel/tcg/cputlb.c | 25 ++++++++++++++-----------
8
include/tcg/tcg.h | 7 +-
13
1 file changed, 14 insertions(+), 11 deletions(-)
9
tcg/tcg.c | 272 +++++++++++++++++++++++-----------------------
10
2 files changed, 136 insertions(+), 143 deletions(-)
14
11
15
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
16
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cputlb.c
14
--- a/include/tcg/tcg.h
18
+++ b/accel/tcg/cputlb.c
15
+++ b/include/tcg/tcg.h
19
@@ -XXX,XX +XXX,XX @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
16
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOpDef {
17
const char *name;
18
uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
19
uint8_t flags;
20
- TCGArgConstraint *args_ct;
21
+ const TCGArgConstraint *args_ct;
22
} TCGOpDef;
23
24
extern TCGOpDef tcg_op_defs[];
25
extern const size_t tcg_op_defs_max;
26
27
-typedef struct TCGTargetOpDef {
28
- TCGOpcode op;
29
- const char *args_ct_str[TCG_MAX_OP_ARGS];
30
-} TCGTargetOpDef;
31
-
32
/*
33
* tcg_op_supported:
34
* Query if @op, for @type and @flags, is supported by the host
35
diff --git a/tcg/tcg.c b/tcg/tcg.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/tcg/tcg.c
38
+++ b/tcg/tcg.c
39
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
40
41
/* Put all of the constraint sets into an array, indexed by the enum. */
42
43
-#define C_O0_I1(I1) { .args_ct_str = { #I1 } },
44
-#define C_O0_I2(I1, I2) { .args_ct_str = { #I1, #I2 } },
45
-#define C_O0_I3(I1, I2, I3) { .args_ct_str = { #I1, #I2, #I3 } },
46
-#define C_O0_I4(I1, I2, I3, I4) { .args_ct_str = { #I1, #I2, #I3, #I4 } },
47
+typedef struct TCGConstraintSet {
48
+ uint8_t nb_oargs, nb_iargs;
49
+ const char *args_ct_str[TCG_MAX_OP_ARGS];
50
+} TCGConstraintSet;
51
52
-#define C_O1_I1(O1, I1) { .args_ct_str = { #O1, #I1 } },
53
-#define C_O1_I2(O1, I1, I2) { .args_ct_str = { #O1, #I1, #I2 } },
54
-#define C_O1_I3(O1, I1, I2, I3) { .args_ct_str = { #O1, #I1, #I2, #I3 } },
55
-#define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } },
56
+#define C_O0_I1(I1) { 0, 1, { #I1 } },
57
+#define C_O0_I2(I1, I2) { 0, 2, { #I1, #I2 } },
58
+#define C_O0_I3(I1, I2, I3) { 0, 3, { #I1, #I2, #I3 } },
59
+#define C_O0_I4(I1, I2, I3, I4) { 0, 4, { #I1, #I2, #I3, #I4 } },
60
61
-#define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } },
62
-#define C_N1O1_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, #O2, #I1 } },
63
-#define C_N2_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, "&" #O2, #I1 } },
64
+#define C_O1_I1(O1, I1) { 1, 1, { #O1, #I1 } },
65
+#define C_O1_I2(O1, I1, I2) { 1, 2, { #O1, #I1, #I2 } },
66
+#define C_O1_I3(O1, I1, I2, I3) { 1, 3, { #O1, #I1, #I2, #I3 } },
67
+#define C_O1_I4(O1, I1, I2, I3, I4) { 1, 4, { #O1, #I1, #I2, #I3, #I4 } },
68
69
-#define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } },
70
-#define C_O2_I2(O1, O2, I1, I2) { .args_ct_str = { #O1, #O2, #I1, #I2 } },
71
-#define C_O2_I3(O1, O2, I1, I2, I3) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3 } },
72
-#define C_O2_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { #O1, #O2, #I1, #I2, #I3, #I4 } },
73
-#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { .args_ct_str = { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
74
+#define C_N1_I2(O1, I1, I2) { 1, 2, { "&" #O1, #I1, #I2 } },
75
+#define C_N1O1_I1(O1, O2, I1) { 2, 1, { "&" #O1, #O2, #I1 } },
76
+#define C_N2_I1(O1, O2, I1) { 2, 1, { "&" #O1, "&" #O2, #I1 } },
77
78
-static const TCGTargetOpDef constraint_sets[] = {
79
+#define C_O2_I1(O1, O2, I1) { 2, 1, { #O1, #O2, #I1 } },
80
+#define C_O2_I2(O1, O2, I1, I2) { 2, 2, { #O1, #O2, #I1, #I2 } },
81
+#define C_O2_I3(O1, O2, I1, I2, I3) { 2, 3, { #O1, #O2, #I1, #I2, #I3 } },
82
+#define C_O2_I4(O1, O2, I1, I2, I3, I4) { 2, 4, { #O1, #O2, #I1, #I2, #I3, #I4 } },
83
+#define C_N1_O1_I4(O1, O2, I1, I2, I3, I4) { 2, 4, { "&" #O1, #O2, #I1, #I2, #I3, #I4 } },
84
+
85
+static const TCGConstraintSet constraint_sets[] = {
86
#include "tcg-target-con-set.h"
87
};
88
89
-
90
#undef C_O0_I1
91
#undef C_O0_I2
92
#undef C_O0_I3
93
@@ -XXX,XX +XXX,XX @@ static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
94
static void tcg_context_init(unsigned max_cpus)
95
{
96
TCGContext *s = &tcg_init_ctx;
97
- int op, total_args, n, i;
98
- TCGOpDef *def;
99
- TCGArgConstraint *args_ct;
100
+ int n, i;
101
TCGTemp *ts;
102
103
memset(s, 0, sizeof(*s));
104
s->nb_globals = 0;
105
106
- /* Count total number of arguments and allocate the corresponding
107
- space */
108
- total_args = 0;
109
- for(op = 0; op < NB_OPS; op++) {
110
- def = &tcg_op_defs[op];
111
- n = def->nb_iargs + def->nb_oargs;
112
- total_args += n;
113
- }
114
-
115
- args_ct = g_new0(TCGArgConstraint, total_args);
116
-
117
- for(op = 0; op < NB_OPS; op++) {
118
- def = &tcg_op_defs[op];
119
- def->args_ct = args_ct;
120
- n = def->nb_iargs + def->nb_oargs;
121
- args_ct += n;
122
- }
123
-
124
init_call_layout(&info_helper_ld32_mmu);
125
init_call_layout(&info_helper_ld64_mmu);
126
init_call_layout(&info_helper_ld128_mmu);
127
@@ -XXX,XX +XXX,XX @@ void tcg_dump_ops(TCGContext *s, FILE *f, bool have_prefs)
128
}
129
130
/* we give more priority to constraints with less registers */
131
-static int get_constraint_priority(const TCGOpDef *def, int k)
132
+static int get_constraint_priority(const TCGArgConstraint *arg_ct, int k)
133
{
134
- const TCGArgConstraint *arg_ct = &def->args_ct[k];
135
- int n = ctpop64(arg_ct->regs);
136
+ int n;
137
+
138
+ arg_ct += k;
139
+ n = ctpop64(arg_ct->regs);
140
141
/*
142
* Sort constraints of a single register first, which includes output
143
@@ -XXX,XX +XXX,XX @@ static int get_constraint_priority(const TCGOpDef *def, int k)
144
}
145
146
/* sort from highest priority to lowest */
147
-static void sort_constraints(TCGOpDef *def, int start, int n)
148
+static void sort_constraints(TCGArgConstraint *a, int start, int n)
149
{
150
int i, j;
151
- TCGArgConstraint *a = def->args_ct;
152
153
for (i = 0; i < n; i++) {
154
a[start + i].sort_index = start + i;
155
@@ -XXX,XX +XXX,XX @@ static void sort_constraints(TCGOpDef *def, int start, int n)
156
}
157
for (i = 0; i < n - 1; i++) {
158
for (j = i + 1; j < n; j++) {
159
- int p1 = get_constraint_priority(def, a[start + i].sort_index);
160
- int p2 = get_constraint_priority(def, a[start + j].sort_index);
161
+ int p1 = get_constraint_priority(a, a[start + i].sort_index);
162
+ int p2 = get_constraint_priority(a, a[start + j].sort_index);
163
if (p1 < p2) {
164
int tmp = a[start + i].sort_index;
165
a[start + i].sort_index = a[start + j].sort_index;
166
@@ -XXX,XX +XXX,XX @@ static void sort_constraints(TCGOpDef *def, int start, int n)
20
}
167
}
21
}
168
}
22
169
23
-static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
170
+static const TCGArgConstraint empty_cts[TCG_MAX_OP_ARGS];
24
-{
171
+static TCGArgConstraint all_cts[ARRAY_SIZE(constraint_sets)][TCG_MAX_OP_ARGS];
25
- /* Discard jump cache entries for any tb which might potentially
172
+
26
- overlap the flushed page. */
173
static void process_op_defs(TCGContext *s)
27
- tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
174
{
28
- tb_jmp_cache_clear_page(cpu, addr);
175
- TCGOpcode op;
29
-}
176
-
30
-
177
- for (op = 0; op < NB_OPS; op++) {
31
/**
178
- TCGOpDef *def = &tcg_op_defs[op];
32
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
179
- const TCGTargetOpDef *tdefs;
33
* @desc: The CPUTLBDesc portion of the TLB
180
+ for (size_t c = 0; c < ARRAY_SIZE(constraint_sets); ++c) {
34
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
181
+ const TCGConstraintSet *tdefs = &constraint_sets[c];
35
}
182
+ TCGArgConstraint *args_ct = all_cts[c];
36
qemu_spin_unlock(&env_tlb(env)->c.lock);
183
+ int nb_oargs = tdefs->nb_oargs;
37
184
+ int nb_iargs = tdefs->nb_iargs;
38
- tb_flush_jmp_cache(cpu, addr);
185
+ int nb_args = nb_oargs + nb_iargs;
39
+ /*
186
bool saw_alias_pair = false;
40
+ * Discard jump cache entries for any tb which might potentially
187
- int i, o, i2, o2, nb_args;
41
+ * overlap the flushed page, which includes the previous.
188
- TCGConstraintSetIndex con_set;
42
+ */
189
43
+ tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
190
- if (def->flags & TCG_OPF_NOT_PRESENT) {
44
+ tb_jmp_cache_clear_page(cpu, addr);
191
- continue;
45
}
192
- }
46
193
-
47
/**
194
- nb_args = def->nb_iargs + def->nb_oargs;
48
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
195
- if (nb_args == 0) {
49
return;
196
- continue;
50
}
197
- }
51
198
-
52
- for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
199
- /*
53
- tb_flush_jmp_cache(cpu, d.addr + i);
200
- * Macro magic should make it impossible, but double-check that
54
+ /*
201
- * the array index is in range. At the same time, double-check
55
+ * Discard jump cache entries for any tb which might potentially
202
- * that the opcode is implemented, i.e. not C_NotImplemented.
56
+ * overlap the flushed pages, which includes the previous.
203
- */
57
+ */
204
- con_set = tcg_target_op_def(op);
58
+ d.addr -= TARGET_PAGE_SIZE;
205
- tcg_debug_assert(con_set >= 0 && con_set < ARRAY_SIZE(constraint_sets));
59
+ for (target_ulong i = 0, n = d.len / TARGET_PAGE_SIZE + 1; i < n; i++) {
206
- tdefs = &constraint_sets[con_set];
60
+ tb_jmp_cache_clear_page(cpu, d.addr);
207
-
61
+ d.addr += TARGET_PAGE_SIZE;
208
- for (i = 0; i < nb_args; i++) {
209
+ for (int i = 0; i < nb_args; i++) {
210
const char *ct_str = tdefs->args_ct_str[i];
211
- bool input_p = i >= def->nb_oargs;
212
-
213
- /* Incomplete TCGTargetOpDef entry. */
214
- tcg_debug_assert(ct_str != NULL);
215
+ bool input_p = i >= nb_oargs;
216
+ int o;
217
218
switch (*ct_str) {
219
case '0' ... '9':
220
o = *ct_str - '0';
221
tcg_debug_assert(input_p);
222
- tcg_debug_assert(o < def->nb_oargs);
223
- tcg_debug_assert(def->args_ct[o].regs != 0);
224
- tcg_debug_assert(!def->args_ct[o].oalias);
225
- def->args_ct[i] = def->args_ct[o];
226
+ tcg_debug_assert(o < nb_oargs);
227
+ tcg_debug_assert(args_ct[o].regs != 0);
228
+ tcg_debug_assert(!args_ct[o].oalias);
229
+ args_ct[i] = args_ct[o];
230
/* The output sets oalias. */
231
- def->args_ct[o].oalias = 1;
232
- def->args_ct[o].alias_index = i;
233
+ args_ct[o].oalias = 1;
234
+ args_ct[o].alias_index = i;
235
/* The input sets ialias. */
236
- def->args_ct[i].ialias = 1;
237
- def->args_ct[i].alias_index = o;
238
- if (def->args_ct[i].pair) {
239
+ args_ct[i].ialias = 1;
240
+ args_ct[i].alias_index = o;
241
+ if (args_ct[i].pair) {
242
saw_alias_pair = true;
243
}
244
tcg_debug_assert(ct_str[1] == '\0');
245
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
246
247
case '&':
248
tcg_debug_assert(!input_p);
249
- def->args_ct[i].newreg = true;
250
+ args_ct[i].newreg = true;
251
ct_str++;
252
break;
253
254
case 'p': /* plus */
255
/* Allocate to the register after the previous. */
256
- tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
257
+ tcg_debug_assert(i > (input_p ? nb_oargs : 0));
258
o = i - 1;
259
- tcg_debug_assert(!def->args_ct[o].pair);
260
- tcg_debug_assert(!def->args_ct[o].ct);
261
- def->args_ct[i] = (TCGArgConstraint){
262
+ tcg_debug_assert(!args_ct[o].pair);
263
+ tcg_debug_assert(!args_ct[o].ct);
264
+ args_ct[i] = (TCGArgConstraint){
265
.pair = 2,
266
.pair_index = o,
267
- .regs = def->args_ct[o].regs << 1,
268
- .newreg = def->args_ct[o].newreg,
269
+ .regs = args_ct[o].regs << 1,
270
+ .newreg = args_ct[o].newreg,
271
};
272
- def->args_ct[o].pair = 1;
273
- def->args_ct[o].pair_index = i;
274
+ args_ct[o].pair = 1;
275
+ args_ct[o].pair_index = i;
276
tcg_debug_assert(ct_str[1] == '\0');
277
continue;
278
279
case 'm': /* minus */
280
/* Allocate to the register before the previous. */
281
- tcg_debug_assert(i > (input_p ? def->nb_oargs : 0));
282
+ tcg_debug_assert(i > (input_p ? nb_oargs : 0));
283
o = i - 1;
284
- tcg_debug_assert(!def->args_ct[o].pair);
285
- tcg_debug_assert(!def->args_ct[o].ct);
286
- def->args_ct[i] = (TCGArgConstraint){
287
+ tcg_debug_assert(!args_ct[o].pair);
288
+ tcg_debug_assert(!args_ct[o].ct);
289
+ args_ct[i] = (TCGArgConstraint){
290
.pair = 1,
291
.pair_index = o,
292
- .regs = def->args_ct[o].regs >> 1,
293
- .newreg = def->args_ct[o].newreg,
294
+ .regs = args_ct[o].regs >> 1,
295
+ .newreg = args_ct[o].newreg,
296
};
297
- def->args_ct[o].pair = 2;
298
- def->args_ct[o].pair_index = i;
299
+ args_ct[o].pair = 2;
300
+ args_ct[o].pair_index = i;
301
tcg_debug_assert(ct_str[1] == '\0');
302
continue;
303
}
304
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
305
do {
306
switch (*ct_str) {
307
case 'i':
308
- def->args_ct[i].ct |= TCG_CT_CONST;
309
+ args_ct[i].ct |= TCG_CT_CONST;
310
break;
311
312
/* Include all of the target-specific constraints. */
313
314
#undef CONST
315
#define CONST(CASE, MASK) \
316
- case CASE: def->args_ct[i].ct |= MASK; break;
317
+ case CASE: args_ct[i].ct |= MASK; break;
318
#define REGS(CASE, MASK) \
319
- case CASE: def->args_ct[i].regs |= MASK; break;
320
+ case CASE: args_ct[i].regs |= MASK; break;
321
322
#include "tcg-target-con-str.h"
323
324
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
325
case '&':
326
case 'p':
327
case 'm':
328
- /* Typo in TCGTargetOpDef constraint. */
329
+ /* Typo in TCGConstraintSet constraint. */
330
g_assert_not_reached();
331
}
332
} while (*++ct_str != '\0');
333
}
334
335
- /* TCGTargetOpDef entry with too much information? */
336
- tcg_debug_assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL);
337
-
338
/*
339
* Fix up output pairs that are aliased with inputs.
340
* When we created the alias, we copied pair from the output.
341
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
342
* first output to pair=3, and the pair_index'es to match.
343
*/
344
if (saw_alias_pair) {
345
- for (i = def->nb_oargs; i < nb_args; i++) {
346
+ for (int i = nb_oargs; i < nb_args; i++) {
347
+ int o, o2, i2;
348
+
349
/*
350
* Since [0-9pm] must be alone in the constraint string,
351
* the only way they can both be set is if the pair comes
352
* from the output alias.
353
*/
354
- if (!def->args_ct[i].ialias) {
355
+ if (!args_ct[i].ialias) {
356
continue;
357
}
358
- switch (def->args_ct[i].pair) {
359
+ switch (args_ct[i].pair) {
360
case 0:
361
break;
362
case 1:
363
- o = def->args_ct[i].alias_index;
364
- o2 = def->args_ct[o].pair_index;
365
- tcg_debug_assert(def->args_ct[o].pair == 1);
366
- tcg_debug_assert(def->args_ct[o2].pair == 2);
367
- if (def->args_ct[o2].oalias) {
368
+ o = args_ct[i].alias_index;
369
+ o2 = args_ct[o].pair_index;
370
+ tcg_debug_assert(args_ct[o].pair == 1);
371
+ tcg_debug_assert(args_ct[o2].pair == 2);
372
+ if (args_ct[o2].oalias) {
373
/* Case 1a */
374
- i2 = def->args_ct[o2].alias_index;
375
- tcg_debug_assert(def->args_ct[i2].pair == 2);
376
- def->args_ct[i2].pair_index = i;
377
- def->args_ct[i].pair_index = i2;
378
+ i2 = args_ct[o2].alias_index;
379
+ tcg_debug_assert(args_ct[i2].pair == 2);
380
+ args_ct[i2].pair_index = i;
381
+ args_ct[i].pair_index = i2;
382
} else {
383
/* Case 1b */
384
- def->args_ct[i].pair_index = i;
385
+ args_ct[i].pair_index = i;
386
}
387
break;
388
case 2:
389
- o = def->args_ct[i].alias_index;
390
- o2 = def->args_ct[o].pair_index;
391
- tcg_debug_assert(def->args_ct[o].pair == 2);
392
- tcg_debug_assert(def->args_ct[o2].pair == 1);
393
- if (def->args_ct[o2].oalias) {
394
+ o = args_ct[i].alias_index;
395
+ o2 = args_ct[o].pair_index;
396
+ tcg_debug_assert(args_ct[o].pair == 2);
397
+ tcg_debug_assert(args_ct[o2].pair == 1);
398
+ if (args_ct[o2].oalias) {
399
/* Case 1a */
400
- i2 = def->args_ct[o2].alias_index;
401
- tcg_debug_assert(def->args_ct[i2].pair == 1);
402
- def->args_ct[i2].pair_index = i;
403
- def->args_ct[i].pair_index = i2;
404
+ i2 = args_ct[o2].alias_index;
405
+ tcg_debug_assert(args_ct[i2].pair == 1);
406
+ args_ct[i2].pair_index = i;
407
+ args_ct[i].pair_index = i2;
408
} else {
409
/* Case 2 */
410
- def->args_ct[i].pair = 3;
411
- def->args_ct[o2].pair = 3;
412
- def->args_ct[i].pair_index = o2;
413
- def->args_ct[o2].pair_index = i;
414
+ args_ct[i].pair = 3;
415
+ args_ct[o2].pair = 3;
416
+ args_ct[i].pair_index = o2;
417
+ args_ct[o2].pair_index = i;
418
}
419
break;
420
default:
421
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
422
}
423
424
/* sort the constraints (XXX: this is just an heuristic) */
425
- sort_constraints(def, 0, def->nb_oargs);
426
- sort_constraints(def, def->nb_oargs, def->nb_iargs);
427
+ sort_constraints(args_ct, 0, nb_oargs);
428
+ sort_constraints(args_ct, nb_oargs, nb_iargs);
429
+ }
430
+
431
+ for (TCGOpcode op = 0; op < NB_OPS; op++) {
432
+ TCGOpDef *def = &tcg_op_defs[op];
433
+ const TCGConstraintSet *tdefs;
434
+ TCGConstraintSetIndex con_set;
435
+ int nb_args;
436
+
437
+ nb_args = def->nb_iargs + def->nb_oargs;
438
+ if (nb_args == 0) {
439
+ continue;
440
+ }
441
+
442
+ if (def->flags & TCG_OPF_NOT_PRESENT) {
443
+ def->args_ct = empty_cts;
444
+ continue;
445
+ }
446
+
447
+ /*
448
+ * Macro magic should make it impossible, but double-check that
449
+ * the array index is in range. At the same time, double-check
450
+ * that the opcode is implemented, i.e. not C_NotImplemented.
451
+ */
452
+ con_set = tcg_target_op_def(op);
453
+ tcg_debug_assert(con_set >= 0 && con_set < ARRAY_SIZE(constraint_sets));
454
+
455
+ /* The constraint arguments must match TCGOpcode arguments. */
456
+ tdefs = &constraint_sets[con_set];
457
+ tcg_debug_assert(tdefs->nb_oargs == def->nb_oargs);
458
+ tcg_debug_assert(tdefs->nb_iargs == def->nb_iargs);
459
+
460
+ def->args_ct = all_cts[con_set];
62
}
461
}
63
}
462
}
64
463
65
--
464
--
66
2.34.1
465
2.43.0
67
466
68
467
diff view generated by jsdifflib
New patch
1
1
Introduce a new function, opcode_args_ct, to look up the argument
2
set for an opcode. We lose the ability to assert the correctness
3
of the map from TCGOpcode to constraint sets at startup, but we can
4
still validate at runtime upon lookup.
5
6
Rename process_op_defs to process_constraint_sets, as it now does
7
nothing to TCGOpDef.
8
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
include/tcg/tcg.h | 1 -
13
tcg/tcg-common.c | 2 +-
14
tcg/tcg.c | 82 ++++++++++++++++++++++-------------------------
15
3 files changed, 40 insertions(+), 45 deletions(-)
16
17
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg.h
20
+++ b/include/tcg/tcg.h
21
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOpDef {
22
const char *name;
23
uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
24
uint8_t flags;
25
- const TCGArgConstraint *args_ct;
26
} TCGOpDef;
27
28
extern TCGOpDef tcg_op_defs[];
29
diff --git a/tcg/tcg-common.c b/tcg/tcg-common.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/tcg/tcg-common.c
32
+++ b/tcg/tcg-common.c
33
@@ -XXX,XX +XXX,XX @@
34
35
TCGOpDef tcg_op_defs[] = {
36
#define DEF(s, oargs, iargs, cargs, flags) \
37
- { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, NULL },
38
+ { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
39
#include "tcg/tcg-opc.h"
40
#undef DEF
41
};
42
diff --git a/tcg/tcg.c b/tcg/tcg.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/tcg/tcg.c
45
+++ b/tcg/tcg.c
46
@@ -XXX,XX +XXX,XX @@ static void init_call_layout(TCGHelperInfo *info)
47
}
48
49
static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
50
-static void process_op_defs(TCGContext *s);
51
+static void process_constraint_sets(void);
52
static TCGTemp *tcg_global_reg_new_internal(TCGContext *s, TCGType type,
53
TCGReg reg, const char *name);
54
55
@@ -XXX,XX +XXX,XX @@ static void tcg_context_init(unsigned max_cpus)
56
init_call_layout(&info_helper_st128_mmu);
57
58
tcg_target_init(s);
59
- process_op_defs(s);
60
+ process_constraint_sets();
61
62
/* Reverse the order of the saved registers, assuming they're all at
63
the start of tcg_target_reg_alloc_order. */
64
@@ -XXX,XX +XXX,XX @@ static void sort_constraints(TCGArgConstraint *a, int start, int n)
65
static const TCGArgConstraint empty_cts[TCG_MAX_OP_ARGS];
66
static TCGArgConstraint all_cts[ARRAY_SIZE(constraint_sets)][TCG_MAX_OP_ARGS];
67
68
-static void process_op_defs(TCGContext *s)
69
+static void process_constraint_sets(void)
70
{
71
for (size_t c = 0; c < ARRAY_SIZE(constraint_sets); ++c) {
72
const TCGConstraintSet *tdefs = &constraint_sets[c];
73
@@ -XXX,XX +XXX,XX @@ static void process_op_defs(TCGContext *s)
74
sort_constraints(args_ct, 0, nb_oargs);
75
sort_constraints(args_ct, nb_oargs, nb_iargs);
76
}
77
+}
78
79
- for (TCGOpcode op = 0; op < NB_OPS; op++) {
80
- TCGOpDef *def = &tcg_op_defs[op];
81
- const TCGConstraintSet *tdefs;
82
- TCGConstraintSetIndex con_set;
83
- int nb_args;
84
+static const TCGArgConstraint *opcode_args_ct(const TCGOp *op)
85
+{
86
+ TCGOpDef *def = &tcg_op_defs[op->opc];
87
+ TCGConstraintSetIndex con_set;
88
89
- nb_args = def->nb_iargs + def->nb_oargs;
90
- if (nb_args == 0) {
91
- continue;
92
- }
93
-
94
- if (def->flags & TCG_OPF_NOT_PRESENT) {
95
- def->args_ct = empty_cts;
96
- continue;
97
- }
98
-
99
- /*
100
- * Macro magic should make it impossible, but double-check that
101
- * the array index is in range. At the same time, double-check
102
- * that the opcode is implemented, i.e. not C_NotImplemented.
103
- */
104
- con_set = tcg_target_op_def(op);
105
- tcg_debug_assert(con_set >= 0 && con_set < ARRAY_SIZE(constraint_sets));
106
-
107
- /* The constraint arguments must match TCGOpcode arguments. */
108
- tdefs = &constraint_sets[con_set];
109
- tcg_debug_assert(tdefs->nb_oargs == def->nb_oargs);
110
- tcg_debug_assert(tdefs->nb_iargs == def->nb_iargs);
111
-
112
- def->args_ct = all_cts[con_set];
113
+ if (def->nb_iargs + def->nb_oargs == 0) {
114
+ return NULL;
115
}
116
+ if (def->flags & TCG_OPF_NOT_PRESENT) {
117
+ return empty_cts;
118
+ }
119
+
120
+ con_set = tcg_target_op_def(op->opc);
121
+ tcg_debug_assert(con_set >= 0 && con_set < ARRAY_SIZE(constraint_sets));
122
+
123
+ /* The constraint arguments must match TCGOpcode arguments. */
124
+ tcg_debug_assert(constraint_sets[con_set].nb_oargs == def->nb_oargs);
125
+ tcg_debug_assert(constraint_sets[con_set].nb_iargs == def->nb_iargs);
126
+
127
+ return all_cts[con_set];
128
}
129
130
static void remove_label_use(TCGOp *op, int idx)
131
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
132
TCGTemp *ts;
133
TCGOpcode opc = op->opc;
134
const TCGOpDef *def = &tcg_op_defs[opc];
135
+ const TCGArgConstraint *args_ct;
136
137
switch (opc) {
138
case INDEX_op_call:
139
@@ -XXX,XX +XXX,XX @@ liveness_pass_1(TCGContext *s)
140
break;
141
142
default:
143
+ args_ct = opcode_args_ct(op);
144
for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
145
- const TCGArgConstraint *ct = &def->args_ct[i];
146
+ const TCGArgConstraint *ct = &args_ct[i];
147
TCGRegSet set, *pset;
148
149
ts = arg_temp(op->args[i]);
150
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
151
{
152
const TCGLifeData arg_life = op->life;
153
TCGRegSet dup_out_regs, dup_in_regs;
154
+ const TCGArgConstraint *dup_args_ct;
155
TCGTemp *its, *ots;
156
TCGType itype, vtype;
157
unsigned vece;
158
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_dup(TCGContext *s, const TCGOp *op)
159
return;
160
}
161
162
- dup_out_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
163
- dup_in_regs = tcg_op_defs[INDEX_op_dup_vec].args_ct[1].regs;
164
+ dup_args_ct = opcode_args_ct(op);
165
+ dup_out_regs = dup_args_ct[0].regs;
166
+ dup_in_regs = dup_args_ct[1].regs;
167
168
/* Allocate the output register now. */
169
if (ots->val_type != TEMP_VAL_REG) {
170
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
171
int i, k, nb_iargs, nb_oargs;
172
TCGReg reg;
173
TCGArg arg;
174
+ const TCGArgConstraint *args_ct;
175
const TCGArgConstraint *arg_ct;
176
TCGTemp *ts;
177
TCGArg new_args[TCG_MAX_OP_ARGS];
178
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
179
break;
180
}
181
182
+ args_ct = opcode_args_ct(op);
183
+
184
/* satisfy input constraints */
185
for (k = 0; k < nb_iargs; k++) {
186
TCGRegSet i_preferred_regs, i_required_regs;
187
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
188
TCGTemp *ts2;
189
int i1, i2;
190
191
- i = def->args_ct[nb_oargs + k].sort_index;
192
+ i = args_ct[nb_oargs + k].sort_index;
193
arg = op->args[i];
194
- arg_ct = &def->args_ct[i];
195
+ arg_ct = &args_ct[i];
196
ts = arg_temp(arg);
197
198
if (ts->val_type == TEMP_VAL_CONST
199
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
200
* register and move it.
201
*/
202
if (temp_readonly(ts) || !IS_DEAD_ARG(i)
203
- || def->args_ct[arg_ct->alias_index].newreg) {
204
+ || args_ct[arg_ct->alias_index].newreg) {
205
allocate_new_reg = true;
206
} else if (ts->val_type == TEMP_VAL_REG) {
207
/*
208
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
209
}
210
211
/* satisfy the output constraints */
212
- for(k = 0; k < nb_oargs; k++) {
213
- i = def->args_ct[k].sort_index;
214
+ for (k = 0; k < nb_oargs; k++) {
215
+ i = args_ct[k].sort_index;
216
arg = op->args[i];
217
- arg_ct = &def->args_ct[i];
218
+ arg_ct = &args_ct[i];
219
ts = arg_temp(arg);
220
221
/* ENV should not be modified. */
222
@@ -XXX,XX +XXX,XX @@ static bool tcg_reg_alloc_dup2(TCGContext *s, const TCGOp *op)
223
/* Allocate the output register now. */
224
if (ots->val_type != TEMP_VAL_REG) {
225
TCGRegSet allocated_regs = s->reserved_regs;
226
- TCGRegSet dup_out_regs =
227
- tcg_op_defs[INDEX_op_dup_vec].args_ct[0].regs;
228
+ TCGRegSet dup_out_regs = opcode_args_ct(op)[0].regs;
229
TCGReg oreg;
230
231
/* Make sure to not spill the input registers. */
232
--
233
2.43.0
234
235
diff view generated by jsdifflib
New patch
1
Now that we're no longer assigning to TCGOpDef.args_ct,
2
we can make the array constant.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
include/tcg/tcg.h | 2 +-
8
tcg/tcg-common.c | 2 +-
9
tcg/tcg.c | 2 +-
10
3 files changed, 3 insertions(+), 3 deletions(-)
11
12
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/include/tcg/tcg.h
15
+++ b/include/tcg/tcg.h
16
@@ -XXX,XX +XXX,XX @@ typedef struct TCGOpDef {
17
uint8_t flags;
18
} TCGOpDef;
19
20
-extern TCGOpDef tcg_op_defs[];
21
+extern const TCGOpDef tcg_op_defs[];
22
extern const size_t tcg_op_defs_max;
23
24
/*
25
diff --git a/tcg/tcg-common.c b/tcg/tcg-common.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/tcg-common.c
28
+++ b/tcg/tcg-common.c
29
@@ -XXX,XX +XXX,XX @@
30
#include "tcg/tcg.h"
31
#include "tcg-has.h"
32
33
-TCGOpDef tcg_op_defs[] = {
34
+const TCGOpDef tcg_op_defs[] = {
35
#define DEF(s, oargs, iargs, cargs, flags) \
36
{ #s, oargs, iargs, cargs, iargs + oargs + cargs, flags },
37
#include "tcg/tcg-opc.h"
38
diff --git a/tcg/tcg.c b/tcg/tcg.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/tcg/tcg.c
41
+++ b/tcg/tcg.c
42
@@ -XXX,XX +XXX,XX @@ static void process_constraint_sets(void)
43
44
static const TCGArgConstraint *opcode_args_ct(const TCGOp *op)
45
{
46
- TCGOpDef *def = &tcg_op_defs[op->opc];
47
+ const TCGOpDef *def = &tcg_op_defs[op->opc];
48
TCGConstraintSetIndex con_set;
49
50
if (def->nb_iargs + def->nb_oargs == 0) {
51
--
52
2.43.0
53
54
diff view generated by jsdifflib
1
Add an interface to return the CPUTLBEntryFull struct
1
We should have checked that the op is supported before
2
that goes with the lookup. The result is not intended
2
emitting it. The backend cannot be expected to have a
3
to be valid across multiple lookups, so the user must
3
constraint set for unsupported ops.
4
use the results immediately.
5
4
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
include/exec/exec-all.h | 15 +++++++++++++
8
tcg/tcg.c | 4 ++++
12
include/qemu/typedefs.h | 1 +
9
1 file changed, 4 insertions(+)
13
accel/tcg/cputlb.c | 47 +++++++++++++++++++++++++----------------
14
3 files changed, 45 insertions(+), 18 deletions(-)
15
10
16
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
11
diff --git a/tcg/tcg.c b/tcg/tcg.c
17
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/exec-all.h
13
--- a/tcg/tcg.c
19
+++ b/include/exec/exec-all.h
14
+++ b/tcg/tcg.c
20
@@ -XXX,XX +XXX,XX @@ int probe_access_flags(CPUArchState *env, target_ulong addr,
15
@@ -XXX,XX +XXX,XX @@ static const TCGArgConstraint *opcode_args_ct(const TCGOp *op)
21
MMUAccessType access_type, int mmu_idx,
16
const TCGOpDef *def = &tcg_op_defs[op->opc];
22
bool nonfault, void **phost, uintptr_t retaddr);
17
TCGConstraintSetIndex con_set;
23
18
24
+#ifndef CONFIG_USER_ONLY
19
+#ifdef CONFIG_DEBUG_TCG
25
+/**
20
+ assert(tcg_op_supported(op->opc, TCGOP_TYPE(op), TCGOP_FLAGS(op)));
26
+ * probe_access_full:
27
+ * Like probe_access_flags, except also return into @pfull.
28
+ *
29
+ * The CPUTLBEntryFull structure returned via @pfull is transient
30
+ * and must be consumed or copied immediately, before any further
31
+ * access or changes to TLB @mmu_idx.
32
+ */
33
+int probe_access_full(CPUArchState *env, target_ulong addr,
34
+ MMUAccessType access_type, int mmu_idx,
35
+ bool nonfault, void **phost,
36
+ CPUTLBEntryFull **pfull, uintptr_t retaddr);
37
+#endif
21
+#endif
38
+
22
+
39
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
23
if (def->nb_iargs + def->nb_oargs == 0) {
40
24
return NULL;
41
/* Estimated block size for TB allocation. */
42
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
43
index XXXXXXX..XXXXXXX 100644
44
--- a/include/qemu/typedefs.h
45
+++ b/include/qemu/typedefs.h
46
@@ -XXX,XX +XXX,XX @@ typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
47
typedef struct CPUAddressSpace CPUAddressSpace;
48
typedef struct CPUArchState CPUArchState;
49
typedef struct CPUState CPUState;
50
+typedef struct CPUTLBEntryFull CPUTLBEntryFull;
51
typedef struct DeviceListener DeviceListener;
52
typedef struct DeviceState DeviceState;
53
typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot;
54
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
55
index XXXXXXX..XXXXXXX 100644
56
--- a/accel/tcg/cputlb.c
57
+++ b/accel/tcg/cputlb.c
58
@@ -XXX,XX +XXX,XX @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
59
static int probe_access_internal(CPUArchState *env, target_ulong addr,
60
int fault_size, MMUAccessType access_type,
61
int mmu_idx, bool nonfault,
62
- void **phost, uintptr_t retaddr)
63
+ void **phost, CPUTLBEntryFull **pfull,
64
+ uintptr_t retaddr)
65
{
66
uintptr_t index = tlb_index(env, mmu_idx, addr);
67
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
68
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
69
mmu_idx, nonfault, retaddr)) {
70
/* Non-faulting page table read failed. */
71
*phost = NULL;
72
+ *pfull = NULL;
73
return TLB_INVALID_MASK;
74
}
75
76
/* TLB resize via tlb_fill may have moved the entry. */
77
+ index = tlb_index(env, mmu_idx, addr);
78
entry = tlb_entry(env, mmu_idx, addr);
79
80
/*
81
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
82
}
83
flags &= tlb_addr;
84
85
+ *pfull = &env_tlb(env)->d[mmu_idx].fulltlb[index];
86
+
87
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
88
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
89
*phost = NULL;
90
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
91
return flags;
92
}
93
94
-int probe_access_flags(CPUArchState *env, target_ulong addr,
95
- MMUAccessType access_type, int mmu_idx,
96
- bool nonfault, void **phost, uintptr_t retaddr)
97
+int probe_access_full(CPUArchState *env, target_ulong addr,
98
+ MMUAccessType access_type, int mmu_idx,
99
+ bool nonfault, void **phost, CPUTLBEntryFull **pfull,
100
+ uintptr_t retaddr)
101
{
102
- int flags;
103
-
104
- flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
105
- nonfault, phost, retaddr);
106
+ int flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
107
+ nonfault, phost, pfull, retaddr);
108
109
/* Handle clean RAM pages. */
110
if (unlikely(flags & TLB_NOTDIRTY)) {
111
- uintptr_t index = tlb_index(env, mmu_idx, addr);
112
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
113
-
114
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
115
+ notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
116
flags &= ~TLB_NOTDIRTY;
117
}
118
119
return flags;
120
}
121
122
+int probe_access_flags(CPUArchState *env, target_ulong addr,
123
+ MMUAccessType access_type, int mmu_idx,
124
+ bool nonfault, void **phost, uintptr_t retaddr)
125
+{
126
+ CPUTLBEntryFull *full;
127
+
128
+ return probe_access_full(env, addr, access_type, mmu_idx,
129
+ nonfault, phost, &full, retaddr);
130
+}
131
+
132
void *probe_access(CPUArchState *env, target_ulong addr, int size,
133
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
134
{
135
+ CPUTLBEntryFull *full;
136
void *host;
137
int flags;
138
139
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
140
141
flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
142
- false, &host, retaddr);
143
+ false, &host, &full, retaddr);
144
145
/* Per the interface, size == 0 merely faults the access. */
146
if (size == 0) {
147
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
148
}
149
150
if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
151
- uintptr_t index = tlb_index(env, mmu_idx, addr);
152
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
153
-
154
/* Handle watchpoints. */
155
if (flags & TLB_WATCHPOINT) {
156
int wp_access = (access_type == MMU_DATA_STORE
157
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
158
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
159
MMUAccessType access_type, int mmu_idx)
160
{
161
+ CPUTLBEntryFull *full;
162
void *host;
163
int flags;
164
165
flags = probe_access_internal(env, addr, 0, access_type,
166
- mmu_idx, true, &host, 0);
167
+ mmu_idx, true, &host, &full, 0);
168
169
/* No combination of flags are expected by the caller. */
170
return flags ? NULL : host;
171
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
172
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
173
void **hostp)
174
{
175
+ CPUTLBEntryFull *full;
176
void *p;
177
178
(void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
179
- cpu_mmu_index(env, true), false, &p, 0);
180
+ cpu_mmu_index(env, true), false, &p, &full, 0);
181
if (p == NULL) {
182
return -1;
183
}
25
}
184
--
26
--
185
2.34.1
27
2.43.0
186
28
187
29
diff view generated by jsdifflib
New patch
1
The br, mb, goto_tb and exit_tb opcodes do not have
2
register operands, only constants, flags, or labels.
3
Remove the special case in opcode_args_ct by including
4
TCG_OPF_NOT_PRESENT in the flags for these opcodes.
1
5
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
include/tcg/tcg-opc.h | 8 ++++----
10
tcg/tcg.c | 3 ---
11
2 files changed, 4 insertions(+), 7 deletions(-)
12
13
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/tcg/tcg-opc.h
16
+++ b/include/tcg/tcg-opc.h
17
@@ -XXX,XX +XXX,XX @@ DEF(set_label, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
18
/* variable number of parameters */
19
DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT)
20
21
-DEF(br, 0, 0, 1, TCG_OPF_BB_END)
22
+DEF(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
23
24
#define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0)
25
#if TCG_TARGET_REG_BITS == 32
26
@@ -XXX,XX +XXX,XX @@ DEF(br, 0, 0, 1, TCG_OPF_BB_END)
27
# define IMPL64 TCG_OPF_64BIT
28
#endif
29
30
-DEF(mb, 0, 0, 1, 0)
31
+DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
32
33
DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
34
DEF(setcond_i32, 1, 2, 1, 0)
35
@@ -XXX,XX +XXX,XX @@ DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64))
36
/* There are tcg_ctx->insn_start_words here, not just one. */
37
DEF(insn_start, 0, 0, DATA64_ARGS, TCG_OPF_NOT_PRESENT)
38
39
-DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
40
-DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
41
+DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
42
+DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
43
DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END)
44
45
DEF(plugin_cb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
46
diff --git a/tcg/tcg.c b/tcg/tcg.c
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/tcg.c
49
+++ b/tcg/tcg.c
50
@@ -XXX,XX +XXX,XX @@ static const TCGArgConstraint *opcode_args_ct(const TCGOp *op)
51
assert(tcg_op_supported(op->opc, TCGOP_TYPE(op), TCGOP_FLAGS(op)));
52
#endif
53
54
- if (def->nb_iargs + def->nb_oargs == 0) {
55
- return NULL;
56
- }
57
if (def->flags & TCG_OPF_NOT_PRESENT) {
58
return empty_cts;
59
}
60
--
61
2.43.0
62
63
diff view generated by jsdifflib
1
Wrap the bare TranslationBlock pointer into a structure.
1
Allow the backend to make constraint choices based on more parameters.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
5
---
7
accel/tcg/tb-hash.h | 1 +
6
tcg/tcg.c | 4 ++--
8
accel/tcg/tb-jmp-cache.h | 24 ++++++++++++++++++++++++
7
tcg/aarch64/tcg-target.c.inc | 3 ++-
9
include/exec/cpu-common.h | 1 +
8
tcg/arm/tcg-target.c.inc | 3 ++-
10
include/hw/core/cpu.h | 15 +--------------
9
tcg/i386/tcg-target.c.inc | 3 ++-
11
include/qemu/typedefs.h | 1 +
10
tcg/loongarch64/tcg-target.c.inc | 3 ++-
12
accel/stubs/tcg-stub.c | 4 ++++
11
tcg/mips/tcg-target.c.inc | 3 ++-
13
accel/tcg/cpu-exec.c | 10 +++++++---
12
tcg/ppc/tcg-target.c.inc | 3 ++-
14
accel/tcg/cputlb.c | 9 +++++----
13
tcg/riscv/tcg-target.c.inc | 3 ++-
15
accel/tcg/translate-all.c | 28 +++++++++++++++++++++++++---
14
tcg/s390x/tcg-target.c.inc | 3 ++-
16
hw/core/cpu-common.c | 3 +--
15
tcg/sparc64/tcg-target.c.inc | 3 ++-
17
plugins/core.c | 2 +-
16
tcg/tci/tcg-target.c.inc | 3 ++-
18
trace/control-target.c | 2 +-
17
11 files changed, 22 insertions(+), 12 deletions(-)
19
12 files changed, 72 insertions(+), 28 deletions(-)
20
create mode 100644 accel/tcg/tb-jmp-cache.h
21
18
22
diff --git a/accel/tcg/tb-hash.h b/accel/tcg/tb-hash.h
19
diff --git a/tcg/tcg.c b/tcg/tcg.c
23
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
24
--- a/accel/tcg/tb-hash.h
21
--- a/tcg/tcg.c
25
+++ b/accel/tcg/tb-hash.h
22
+++ b/tcg/tcg.c
26
@@ -XXX,XX +XXX,XX @@
23
@@ -XXX,XX +XXX,XX @@ typedef enum {
27
#include "exec/cpu-defs.h"
24
#include "tcg-target-con-set.h"
28
#include "exec/exec-all.h"
25
} TCGConstraintSetIndex;
29
#include "qemu/xxhash.h"
26
30
+#include "tb-jmp-cache.h"
27
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode);
31
28
+static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode, TCGType, unsigned);
32
#ifdef CONFIG_SOFTMMU
29
33
30
#undef C_O0_I1
34
diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h
31
#undef C_O0_I2
35
new file mode 100644
32
@@ -XXX,XX +XXX,XX @@ static const TCGArgConstraint *opcode_args_ct(const TCGOp *op)
36
index XXXXXXX..XXXXXXX
33
return empty_cts;
37
--- /dev/null
34
}
38
+++ b/accel/tcg/tb-jmp-cache.h
35
39
@@ -XXX,XX +XXX,XX @@
36
- con_set = tcg_target_op_def(op->opc);
40
+/*
37
+ con_set = tcg_target_op_def(op->opc, TCGOP_TYPE(op), TCGOP_FLAGS(op));
41
+ * The per-CPU TranslationBlock jump cache.
38
tcg_debug_assert(con_set >= 0 && con_set < ARRAY_SIZE(constraint_sets));
42
+ *
39
43
+ * Copyright (c) 2003 Fabrice Bellard
40
/* The constraint arguments must match TCGOpcode arguments. */
44
+ *
41
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
45
+ * SPDX-License-Identifier: GPL-2.0-or-later
46
+ */
47
+
48
+#ifndef ACCEL_TCG_TB_JMP_CACHE_H
49
+#define ACCEL_TCG_TB_JMP_CACHE_H
50
+
51
+#define TB_JMP_CACHE_BITS 12
52
+#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
53
+
54
+/*
55
+ * Accessed in parallel; all accesses to 'tb' must be atomic.
56
+ */
57
+struct CPUJumpCache {
58
+ struct {
59
+ TranslationBlock *tb;
60
+ } array[TB_JMP_CACHE_SIZE];
61
+};
62
+
63
+#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
64
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
65
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
66
--- a/include/exec/cpu-common.h
43
--- a/tcg/aarch64/tcg-target.c.inc
67
+++ b/include/exec/cpu-common.h
44
+++ b/tcg/aarch64/tcg-target.c.inc
68
@@ -XXX,XX +XXX,XX @@ void cpu_list_unlock(void);
45
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
69
unsigned int cpu_list_generation_id_get(void);
70
71
void tcg_flush_softmmu_tlb(CPUState *cs);
72
+void tcg_flush_jmp_cache(CPUState *cs);
73
74
void tcg_iommu_init_notifier_list(CPUState *cpu);
75
void tcg_iommu_free_notifier_list(CPUState *cpu);
76
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
77
index XXXXXXX..XXXXXXX 100644
78
--- a/include/hw/core/cpu.h
79
+++ b/include/hw/core/cpu.h
80
@@ -XXX,XX +XXX,XX @@ struct kvm_run;
81
struct hax_vcpu_state;
82
struct hvf_vcpu_state;
83
84
-#define TB_JMP_CACHE_BITS 12
85
-#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
86
-
87
/* work queue */
88
89
/* The union type allows passing of 64 bit target pointers on 32 bit
90
@@ -XXX,XX +XXX,XX @@ struct CPUState {
91
CPUArchState *env_ptr;
92
IcountDecr *icount_decr_ptr;
93
94
- /* Accessed in parallel; all accesses must be atomic */
95
- TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
96
+ CPUJumpCache *tb_jmp_cache;
97
98
struct GDBRegisterState *gdb_regs;
99
int gdb_num_regs;
100
@@ -XXX,XX +XXX,XX @@ extern CPUTailQ cpus;
101
102
extern __thread CPUState *current_cpu;
103
104
-static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
105
-{
106
- unsigned int i;
107
-
108
- for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
109
- qatomic_set(&cpu->tb_jmp_cache[i], NULL);
110
- }
111
-}
112
-
113
/**
114
* qemu_tcg_mttcg_enabled:
115
* Check whether we are running MultiThread TCG or not.
116
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
117
index XXXXXXX..XXXXXXX 100644
118
--- a/include/qemu/typedefs.h
119
+++ b/include/qemu/typedefs.h
120
@@ -XXX,XX +XXX,XX @@ typedef struct CoMutex CoMutex;
121
typedef struct ConfidentialGuestSupport ConfidentialGuestSupport;
122
typedef struct CPUAddressSpace CPUAddressSpace;
123
typedef struct CPUArchState CPUArchState;
124
+typedef struct CPUJumpCache CPUJumpCache;
125
typedef struct CPUState CPUState;
126
typedef struct CPUTLBEntryFull CPUTLBEntryFull;
127
typedef struct DeviceListener DeviceListener;
128
diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/accel/stubs/tcg-stub.c
131
+++ b/accel/stubs/tcg-stub.c
132
@@ -XXX,XX +XXX,XX @@ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
133
{
134
}
135
136
+void tcg_flush_jmp_cache(CPUState *cpu)
137
+{
138
+}
139
+
140
int probe_access_flags(CPUArchState *env, target_ulong addr,
141
MMUAccessType access_type, int mmu_idx,
142
bool nonfault, void **phost, uintptr_t retaddr)
143
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/accel/tcg/cpu-exec.c
146
+++ b/accel/tcg/cpu-exec.c
147
@@ -XXX,XX +XXX,XX @@
148
#include "sysemu/replay.h"
149
#include "sysemu/tcg.h"
150
#include "exec/helper-proto.h"
151
+#include "tb-jmp-cache.h"
152
#include "tb-hash.h"
153
#include "tb-context.h"
154
#include "internal.h"
155
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
156
tcg_debug_assert(!(cflags & CF_INVALID));
157
158
hash = tb_jmp_cache_hash_func(pc);
159
- tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
160
+ tb = qatomic_rcu_read(&cpu->tb_jmp_cache->array[hash].tb);
161
162
if (likely(tb &&
163
tb->pc == pc &&
164
@@ -XXX,XX +XXX,XX @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
165
if (tb == NULL) {
166
return NULL;
167
}
168
- qatomic_set(&cpu->tb_jmp_cache[hash], tb);
169
+ qatomic_set(&cpu->tb_jmp_cache->array[hash].tb, tb);
170
return tb;
171
}
172
173
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
174
175
tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
176
if (tb == NULL) {
177
+ uint32_t h;
178
+
179
mmap_lock();
180
tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
181
mmap_unlock();
182
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
183
* We add the TB in the virtual pc hash table
184
* for the fast lookup
185
*/
186
- qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
187
+ h = tb_jmp_cache_hash_func(pc);
188
+ qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
189
}
190
191
#ifndef CONFIG_USER_ONLY
192
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
193
index XXXXXXX..XXXXXXX 100644
194
--- a/accel/tcg/cputlb.c
195
+++ b/accel/tcg/cputlb.c
196
@@ -XXX,XX +XXX,XX @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
197
198
static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
199
{
200
- unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
201
+ int i, i0 = tb_jmp_cache_hash_page(page_addr);
202
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
203
204
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
205
- qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
206
+ qatomic_set(&jc->array[i0 + i].tb, NULL);
207
}
46
}
208
}
47
}
209
48
210
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
49
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
211
50
+static TCGConstraintSetIndex
212
qemu_spin_unlock(&env_tlb(env)->c.lock);
51
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
213
52
{
214
- cpu_tb_jmp_cache_clear(cpu);
53
switch (op) {
215
+ tcg_flush_jmp_cache(cpu);
54
case INDEX_op_goto_ptr:
216
55
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
217
if (to_clean == ALL_MMUIDX_BITS) {
218
qatomic_set(&env_tlb(env)->c.full_flush_count,
219
@@ -XXX,XX +XXX,XX @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
220
* longer to clear each entry individually than it will to clear it all.
221
*/
222
if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
223
- cpu_tb_jmp_cache_clear(cpu);
224
+ tcg_flush_jmp_cache(cpu);
225
return;
226
}
227
228
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
229
index XXXXXXX..XXXXXXX 100644
56
index XXXXXXX..XXXXXXX 100644
230
--- a/accel/tcg/translate-all.c
57
--- a/tcg/arm/tcg-target.c.inc
231
+++ b/accel/tcg/translate-all.c
58
+++ b/tcg/arm/tcg-target.c.inc
232
@@ -XXX,XX +XXX,XX @@
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
233
#include "sysemu/tcg.h"
234
#include "qapi/error.h"
235
#include "hw/core/tcg-cpu-ops.h"
236
+#include "tb-jmp-cache.h"
237
#include "tb-hash.h"
238
#include "tb-context.h"
239
#include "internal.h"
240
@@ -XXX,XX +XXX,XX @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
241
}
242
243
CPU_FOREACH(cpu) {
244
- cpu_tb_jmp_cache_clear(cpu);
245
+ tcg_flush_jmp_cache(cpu);
246
}
247
248
qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
249
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
250
/* remove the TB from the hash list */
251
h = tb_jmp_cache_hash_func(tb->pc);
252
CPU_FOREACH(cpu) {
253
- if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
254
- qatomic_set(&cpu->tb_jmp_cache[h], NULL);
255
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
256
+ if (qatomic_read(&jc->array[h].tb) == tb) {
257
+ qatomic_set(&jc->array[h].tb, NULL);
258
}
259
}
260
261
@@ -XXX,XX +XXX,XX @@ int page_unprotect(target_ulong address, uintptr_t pc)
262
}
263
#endif /* CONFIG_USER_ONLY */
264
265
+/*
266
+ * Called by generic code at e.g. cpu reset after cpu creation,
267
+ * therefore we must be prepared to allocate the jump cache.
268
+ */
269
+void tcg_flush_jmp_cache(CPUState *cpu)
270
+{
271
+ CPUJumpCache *jc = cpu->tb_jmp_cache;
272
+
273
+ if (likely(jc)) {
274
+ for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
275
+ qatomic_set(&jc->array[i].tb, NULL);
276
+ }
277
+ } else {
278
+ /* This should happen once during realize, and thus never race. */
279
+ jc = g_new0(CPUJumpCache, 1);
280
+ jc = qatomic_xchg(&cpu->tb_jmp_cache, jc);
281
+ assert(jc == NULL);
282
+ }
283
+}
284
+
285
/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
286
void tcg_flush_softmmu_tlb(CPUState *cs)
287
{
288
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
289
index XXXXXXX..XXXXXXX 100644
290
--- a/hw/core/cpu-common.c
291
+++ b/hw/core/cpu-common.c
292
@@ -XXX,XX +XXX,XX @@ static void cpu_common_reset(DeviceState *dev)
293
cpu->cflags_next_tb = -1;
294
295
if (tcg_enabled()) {
296
- cpu_tb_jmp_cache_clear(cpu);
297
-
298
+ tcg_flush_jmp_cache(cpu);
299
tcg_flush_softmmu_tlb(cpu);
300
}
60
}
301
}
61
}
302
diff --git a/plugins/core.c b/plugins/core.c
62
63
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
64
+static TCGConstraintSetIndex
65
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
66
{
67
switch (op) {
68
case INDEX_op_goto_ptr:
69
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
303
index XXXXXXX..XXXXXXX 100644
70
index XXXXXXX..XXXXXXX 100644
304
--- a/plugins/core.c
71
--- a/tcg/i386/tcg-target.c.inc
305
+++ b/plugins/core.c
72
+++ b/tcg/i386/tcg-target.c.inc
306
@@ -XXX,XX +XXX,XX @@ struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
73
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
307
static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
74
}
75
}
76
77
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
78
+static TCGConstraintSetIndex
79
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
308
{
80
{
309
bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX);
81
switch (op) {
310
- cpu_tb_jmp_cache_clear(cpu);
82
case INDEX_op_goto_ptr:
311
+ tcg_flush_jmp_cache(cpu);
83
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
84
index XXXXXXX..XXXXXXX 100644
85
--- a/tcg/loongarch64/tcg-target.c.inc
86
+++ b/tcg/loongarch64/tcg-target.c.inc
87
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
88
g_assert_not_reached();
312
}
89
}
313
90
314
static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)
91
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
315
diff --git a/trace/control-target.c b/trace/control-target.c
92
+static TCGConstraintSetIndex
93
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
94
{
95
switch (op) {
96
case INDEX_op_goto_ptr:
97
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
316
index XXXXXXX..XXXXXXX 100644
98
index XXXXXXX..XXXXXXX 100644
317
--- a/trace/control-target.c
99
--- a/tcg/mips/tcg-target.c.inc
318
+++ b/trace/control-target.c
100
+++ b/tcg/mips/tcg-target.c.inc
319
@@ -XXX,XX +XXX,XX @@ static void trace_event_synchronize_vcpu_state_dynamic(
101
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
102
}
103
}
104
105
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
106
+static TCGConstraintSetIndex
107
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
320
{
108
{
321
bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed,
109
switch (op) {
322
CPU_TRACE_DSTATE_MAX_EVENTS);
110
case INDEX_op_goto_ptr:
323
- cpu_tb_jmp_cache_clear(vcpu);
111
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
324
+ tcg_flush_jmp_cache(vcpu);
112
index XXXXXXX..XXXXXXX 100644
113
--- a/tcg/ppc/tcg-target.c.inc
114
+++ b/tcg/ppc/tcg-target.c.inc
115
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
116
va_end(va);
325
}
117
}
326
118
327
void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
119
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
120
+static TCGConstraintSetIndex
121
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
122
{
123
switch (op) {
124
case INDEX_op_goto_ptr:
125
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
126
index XXXXXXX..XXXXXXX 100644
127
--- a/tcg/riscv/tcg-target.c.inc
128
+++ b/tcg/riscv/tcg-target.c.inc
129
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
130
}
131
}
132
133
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
134
+static TCGConstraintSetIndex
135
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
136
{
137
switch (op) {
138
case INDEX_op_goto_ptr:
139
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
140
index XXXXXXX..XXXXXXX 100644
141
--- a/tcg/s390x/tcg-target.c.inc
142
+++ b/tcg/s390x/tcg-target.c.inc
143
@@ -XXX,XX +XXX,XX @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
144
va_end(va);
145
}
146
147
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
148
+static TCGConstraintSetIndex
149
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
150
{
151
switch (op) {
152
case INDEX_op_goto_ptr:
153
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
154
index XXXXXXX..XXXXXXX 100644
155
--- a/tcg/sparc64/tcg-target.c.inc
156
+++ b/tcg/sparc64/tcg-target.c.inc
157
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
158
}
159
}
160
161
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
162
+static TCGConstraintSetIndex
163
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
164
{
165
switch (op) {
166
case INDEX_op_goto_ptr:
167
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
168
index XXXXXXX..XXXXXXX 100644
169
--- a/tcg/tci/tcg-target.c.inc
170
+++ b/tcg/tci/tcg-target.c.inc
171
@@ -XXX,XX +XXX,XX @@
172
#endif
173
#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
174
175
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
176
+static TCGConstraintSetIndex
177
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
178
{
179
switch (op) {
180
case INDEX_op_goto_ptr:
328
--
181
--
329
2.34.1
182
2.43.0
330
183
331
184
diff view generated by jsdifflib
New patch
1
1
Pass TCGOp.type to the output function.
2
For aarch64 and tci, use this instead of testing TCG_OPF_64BIT.
3
For s390x, use this instead of testing INDEX_op_deposit_i64.
4
For i386, use this to initialize rexw.
5
6
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
9
tcg/tcg.c | 4 ++--
10
tcg/aarch64/tcg-target.c.inc | 6 +-----
11
tcg/arm/tcg-target.c.inc | 2 +-
12
tcg/i386/tcg-target.c.inc | 10 +++++-----
13
tcg/loongarch64/tcg-target.c.inc | 2 +-
14
tcg/mips/tcg-target.c.inc | 2 +-
15
tcg/ppc/tcg-target.c.inc | 2 +-
16
tcg/riscv/tcg-target.c.inc | 2 +-
17
tcg/s390x/tcg-target.c.inc | 7 +++----
18
tcg/sparc64/tcg-target.c.inc | 2 +-
19
tcg/tci/tcg-target.c.inc | 4 ++--
20
11 files changed, 19 insertions(+), 24 deletions(-)
21
22
diff --git a/tcg/tcg.c b/tcg/tcg.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/tcg/tcg.c
25
+++ b/tcg/tcg.c
26
@@ -XXX,XX +XXX,XX @@ static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
27
static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2);
28
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
29
static void tcg_out_goto_tb(TCGContext *s, int which);
30
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
31
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
32
const TCGArg args[TCG_MAX_OP_ARGS],
33
const int const_args[TCG_MAX_OP_ARGS]);
34
#if TCG_TARGET_MAYBE_vec
35
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
36
tcg_out_vec_op(s, op->opc, TCGOP_TYPE(op) - TCG_TYPE_V64,
37
TCGOP_VECE(op), new_args, const_args);
38
} else {
39
- tcg_out_op(s, op->opc, new_args, const_args);
40
+ tcg_out_op(s, op->opc, TCGOP_TYPE(op), new_args, const_args);
41
}
42
break;
43
}
44
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
45
index XXXXXXX..XXXXXXX 100644
46
--- a/tcg/aarch64/tcg-target.c.inc
47
+++ b/tcg/aarch64/tcg-target.c.inc
48
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
49
flush_idcache_range(jmp_rx, jmp_rw, 4);
50
}
51
52
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
53
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
54
const TCGArg args[TCG_MAX_OP_ARGS],
55
const int const_args[TCG_MAX_OP_ARGS])
56
{
57
- /* 99% of the time, we can signal the use of extension registers
58
- by looking to see if the opcode handles 64-bit data. */
59
- TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0;
60
-
61
/* Hoist the loads of the most common arguments. */
62
TCGArg a0 = args[0];
63
TCGArg a1 = args[1];
64
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
65
index XXXXXXX..XXXXXXX 100644
66
--- a/tcg/arm/tcg-target.c.inc
67
+++ b/tcg/arm/tcg-target.c.inc
68
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
69
flush_idcache_range(jmp_rx, jmp_rw, 4);
70
}
71
72
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
73
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
74
const TCGArg args[TCG_MAX_OP_ARGS],
75
const int const_args[TCG_MAX_OP_ARGS])
76
{
77
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
78
index XXXXXXX..XXXXXXX 100644
79
--- a/tcg/i386/tcg-target.c.inc
80
+++ b/tcg/i386/tcg-target.c.inc
81
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
82
/* no need to flush icache explicitly */
83
}
84
85
-static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
86
- const TCGArg args[TCG_MAX_OP_ARGS],
87
- const int const_args[TCG_MAX_OP_ARGS])
88
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
89
+ const TCGArg args[TCG_MAX_OP_ARGS],
90
+ const int const_args[TCG_MAX_OP_ARGS])
91
{
92
TCGArg a0, a1, a2;
93
- int c, const_a2, vexop, rexw = 0;
94
+ int c, const_a2, vexop, rexw;
95
96
#if TCG_TARGET_REG_BITS == 64
97
# define OP_32_64(x) \
98
case glue(glue(INDEX_op_, x), _i64): \
99
- rexw = P_REXW; /* FALLTHRU */ \
100
case glue(glue(INDEX_op_, x), _i32)
101
#else
102
# define OP_32_64(x) \
103
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
104
a1 = args[1];
105
a2 = args[2];
106
const_a2 = const_args[2];
107
+ rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
108
109
switch (opc) {
110
case INDEX_op_goto_ptr:
111
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
112
index XXXXXXX..XXXXXXX 100644
113
--- a/tcg/loongarch64/tcg-target.c.inc
114
+++ b/tcg/loongarch64/tcg-target.c.inc
115
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
116
flush_idcache_range(jmp_rx, jmp_rw, 4);
117
}
118
119
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
120
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
121
const TCGArg args[TCG_MAX_OP_ARGS],
122
const int const_args[TCG_MAX_OP_ARGS])
123
{
124
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
125
index XXXXXXX..XXXXXXX 100644
126
--- a/tcg/mips/tcg-target.c.inc
127
+++ b/tcg/mips/tcg-target.c.inc
128
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
129
/* Always indirect, nothing to do */
130
}
131
132
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
133
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
134
const TCGArg args[TCG_MAX_OP_ARGS],
135
const int const_args[TCG_MAX_OP_ARGS])
136
{
137
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
138
index XXXXXXX..XXXXXXX 100644
139
--- a/tcg/ppc/tcg-target.c.inc
140
+++ b/tcg/ppc/tcg-target.c.inc
141
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
142
flush_idcache_range(jmp_rx, jmp_rw, 4);
143
}
144
145
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
146
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
147
const TCGArg args[TCG_MAX_OP_ARGS],
148
const int const_args[TCG_MAX_OP_ARGS])
149
{
150
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
151
index XXXXXXX..XXXXXXX 100644
152
--- a/tcg/riscv/tcg-target.c.inc
153
+++ b/tcg/riscv/tcg-target.c.inc
154
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
155
flush_idcache_range(jmp_rx, jmp_rw, 4);
156
}
157
158
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
159
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
160
const TCGArg args[TCG_MAX_OP_ARGS],
161
const int const_args[TCG_MAX_OP_ARGS])
162
{
163
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
164
index XXXXXXX..XXXXXXX 100644
165
--- a/tcg/s390x/tcg-target.c.inc
166
+++ b/tcg/s390x/tcg-target.c.inc
167
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
168
case glue(glue(INDEX_op_,x),_i32): \
169
case glue(glue(INDEX_op_,x),_i64)
170
171
-static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
172
- const TCGArg args[TCG_MAX_OP_ARGS],
173
- const int const_args[TCG_MAX_OP_ARGS])
174
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
175
+ const TCGArg args[TCG_MAX_OP_ARGS],
176
+ const int const_args[TCG_MAX_OP_ARGS])
177
{
178
S390Opcode op, op2;
179
TCGArg a0, a1, a2;
180
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
181
/* Since we can't support "0Z" as a constraint, we allow a1 in
182
any register. Fix things up as if a matching constraint. */
183
if (a0 != a1) {
184
- TCGType type = (opc == INDEX_op_deposit_i64);
185
if (a0 == a2) {
186
tcg_out_mov(s, type, TCG_TMP0, a2);
187
a2 = TCG_TMP0;
188
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
189
index XXXXXXX..XXXXXXX 100644
190
--- a/tcg/sparc64/tcg-target.c.inc
191
+++ b/tcg/sparc64/tcg-target.c.inc
192
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
193
{
194
}
195
196
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
197
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
198
const TCGArg args[TCG_MAX_OP_ARGS],
199
const int const_args[TCG_MAX_OP_ARGS])
200
{
201
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
202
index XXXXXXX..XXXXXXX 100644
203
--- a/tcg/tci/tcg-target.c.inc
204
+++ b/tcg/tci/tcg-target.c.inc
205
@@ -XXX,XX +XXX,XX @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
206
/* Always indirect, nothing to do */
207
}
208
209
-static void tcg_out_op(TCGContext *s, TCGOpcode opc,
210
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
211
const TCGArg args[TCG_MAX_OP_ARGS],
212
const int const_args[TCG_MAX_OP_ARGS])
213
{
214
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
215
CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
216
{
217
TCGArg pos = args[2], len = args[3];
218
- TCGArg max = tcg_op_defs[opc].flags & TCG_OPF_64BIT ? 64 : 32;
219
+ TCGArg max = type == TCG_TYPE_I32 ? 32 : 64;
220
221
tcg_debug_assert(pos < max);
222
tcg_debug_assert(pos + len <= max);
223
--
224
2.43.0
225
226
diff view generated by jsdifflib
New patch
1
This flag is no longer used.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/tcg/tcg-opc.h | 22 +++++++++++-----------
7
include/tcg/tcg.h | 2 --
8
2 files changed, 11 insertions(+), 13 deletions(-)
9
10
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/include/tcg/tcg-opc.h
13
+++ b/include/tcg/tcg-opc.h
14
@@ -XXX,XX +XXX,XX @@ DEF(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
15
16
#define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0)
17
#if TCG_TARGET_REG_BITS == 32
18
-# define IMPL64 TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT
19
+# define IMPL64 TCG_OPF_NOT_PRESENT
20
#else
21
-# define IMPL64 TCG_OPF_64BIT
22
+# define IMPL64 0
23
#endif
24
25
DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
26
@@ -XXX,XX +XXX,XX @@ DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32))
27
DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32))
28
DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
29
30
-DEF(mov_i64, 1, 1, 0, TCG_OPF_64BIT | TCG_OPF_NOT_PRESENT)
31
+DEF(mov_i64, 1, 1, 0, TCG_OPF_NOT_PRESENT)
32
DEF(setcond_i64, 1, 2, 1, IMPL64)
33
DEF(negsetcond_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_negsetcond_i64))
34
DEF(movcond_i64, 1, 4, 1, IMPL64)
35
@@ -XXX,XX +XXX,XX @@ DEF(qemu_ld_a32_i32, 1, 1, 1,
36
DEF(qemu_st_a32_i32, 0, 1 + 1, 1,
37
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
38
DEF(qemu_ld_a32_i64, DATA64_ARGS, 1, 1,
39
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
40
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
41
DEF(qemu_st_a32_i64, 0, DATA64_ARGS + 1, 1,
42
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
43
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
44
45
DEF(qemu_ld_a64_i32, 1, DATA64_ARGS, 1,
46
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
47
DEF(qemu_st_a64_i32, 0, 1 + DATA64_ARGS, 1,
48
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
49
DEF(qemu_ld_a64_i64, DATA64_ARGS, DATA64_ARGS, 1,
50
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
51
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
52
DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1,
53
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
54
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
55
56
/* Only used by i386 to cope with stupid register constraints. */
57
DEF(qemu_st8_a32_i32, 0, 1 + 1, 1,
58
@@ -XXX,XX +XXX,XX @@ DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1,
59
60
/* Only for 64-bit hosts at the moment. */
61
DEF(qemu_ld_a32_i128, 2, 1, 1,
62
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
63
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
64
IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
65
DEF(qemu_ld_a64_i128, 2, 1, 1,
66
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
67
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
68
IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
69
DEF(qemu_st_a32_i128, 0, 3, 1,
70
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
71
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
72
IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
73
DEF(qemu_st_a64_i128, 0, 3, 1,
74
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
75
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
76
IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
77
78
/* Host vector support. */
79
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
80
index XXXXXXX..XXXXXXX 100644
81
--- a/include/tcg/tcg.h
82
+++ b/include/tcg/tcg.h
83
@@ -XXX,XX +XXX,XX @@ enum {
84
/* Instruction has side effects: it cannot be removed if its outputs
85
are not used, and might trigger exceptions. */
86
TCG_OPF_SIDE_EFFECTS = 0x08,
87
- /* Instruction operands are 64-bits (otherwise 32-bits). */
88
- TCG_OPF_64BIT = 0x10,
89
/* Instruction is optional and not implemented by the host, or insn
90
is generic and should not be implemented by the host. */
91
TCG_OPF_NOT_PRESENT = 0x20,
92
--
93
2.43.0
94
95
diff view generated by jsdifflib
New patch
1
Now that we use a functional interface to query whether the opcode
2
is supported, we can drop the TCG_OPF_NOT_PRESENT bit mapping from
3
TCG_TARGET_HAS_foo in tcg-opc.h
1
4
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
include/tcg/tcg-opc.h | 306 +++++++++++++++++++-----------------------
9
1 file changed, 141 insertions(+), 165 deletions(-)
10
11
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/include/tcg/tcg-opc.h
14
+++ b/include/tcg/tcg-opc.h
15
@@ -XXX,XX +XXX,XX @@ DEF(call, 0, 0, 3, TCG_OPF_CALL_CLOBBER | TCG_OPF_NOT_PRESENT)
16
17
DEF(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_NOT_PRESENT)
18
19
-#define IMPL(X) (__builtin_constant_p(X) && (X) <= 0 ? TCG_OPF_NOT_PRESENT : 0)
20
-#if TCG_TARGET_REG_BITS == 32
21
-# define IMPL64 TCG_OPF_NOT_PRESENT
22
-#else
23
-# define IMPL64 0
24
-#endif
25
-
26
DEF(mb, 0, 0, 1, TCG_OPF_NOT_PRESENT)
27
28
DEF(mov_i32, 1, 1, 0, TCG_OPF_NOT_PRESENT)
29
DEF(setcond_i32, 1, 2, 1, 0)
30
-DEF(negsetcond_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_negsetcond_i32))
31
+DEF(negsetcond_i32, 1, 2, 1, 0)
32
DEF(movcond_i32, 1, 4, 1, 0)
33
/* load/store */
34
DEF(ld8u_i32, 1, 1, 1, 0)
35
@@ -XXX,XX +XXX,XX @@ DEF(st_i32, 0, 2, 1, 0)
36
DEF(add_i32, 1, 2, 0, 0)
37
DEF(sub_i32, 1, 2, 0, 0)
38
DEF(mul_i32, 1, 2, 0, 0)
39
-DEF(div_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
40
-DEF(divu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_div_i32))
41
-DEF(rem_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
42
-DEF(remu_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rem_i32))
43
-DEF(div2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
44
-DEF(divu2_i32, 2, 3, 0, IMPL(TCG_TARGET_HAS_div2_i32))
45
+DEF(div_i32, 1, 2, 0, 0)
46
+DEF(divu_i32, 1, 2, 0, 0)
47
+DEF(rem_i32, 1, 2, 0, 0)
48
+DEF(remu_i32, 1, 2, 0, 0)
49
+DEF(div2_i32, 2, 3, 0, 0)
50
+DEF(divu2_i32, 2, 3, 0, 0)
51
DEF(and_i32, 1, 2, 0, 0)
52
DEF(or_i32, 1, 2, 0, 0)
53
DEF(xor_i32, 1, 2, 0, 0)
54
@@ -XXX,XX +XXX,XX @@ DEF(xor_i32, 1, 2, 0, 0)
55
DEF(shl_i32, 1, 2, 0, 0)
56
DEF(shr_i32, 1, 2, 0, 0)
57
DEF(sar_i32, 1, 2, 0, 0)
58
-DEF(rotl_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
59
-DEF(rotr_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_rot_i32))
60
-DEF(deposit_i32, 1, 2, 2, IMPL(TCG_TARGET_HAS_deposit_i32))
61
-DEF(extract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_extract_i32))
62
-DEF(sextract_i32, 1, 1, 2, IMPL(TCG_TARGET_HAS_sextract_i32))
63
-DEF(extract2_i32, 1, 2, 1, IMPL(TCG_TARGET_HAS_extract2_i32))
64
+DEF(rotl_i32, 1, 2, 0, 0)
65
+DEF(rotr_i32, 1, 2, 0, 0)
66
+DEF(deposit_i32, 1, 2, 2, 0)
67
+DEF(extract_i32, 1, 1, 2, 0)
68
+DEF(sextract_i32, 1, 1, 2, 0)
69
+DEF(extract2_i32, 1, 2, 1, 0)
70
71
DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
72
73
-DEF(add2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_add2_i32))
74
-DEF(sub2_i32, 2, 4, 0, IMPL(TCG_TARGET_HAS_sub2_i32))
75
-DEF(mulu2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_mulu2_i32))
76
-DEF(muls2_i32, 2, 2, 0, IMPL(TCG_TARGET_HAS_muls2_i32))
77
-DEF(muluh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_muluh_i32))
78
-DEF(mulsh_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_mulsh_i32))
79
-DEF(brcond2_i32, 0, 4, 2,
80
- TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL(TCG_TARGET_REG_BITS == 32))
81
-DEF(setcond2_i32, 1, 4, 1, IMPL(TCG_TARGET_REG_BITS == 32))
82
+DEF(add2_i32, 2, 4, 0, 0)
83
+DEF(sub2_i32, 2, 4, 0, 0)
84
+DEF(mulu2_i32, 2, 2, 0, 0)
85
+DEF(muls2_i32, 2, 2, 0, 0)
86
+DEF(muluh_i32, 1, 2, 0, 0)
87
+DEF(mulsh_i32, 1, 2, 0, 0)
88
+DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
89
+DEF(setcond2_i32, 1, 4, 1, 0)
90
91
-DEF(ext8s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8s_i32))
92
-DEF(ext16s_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16s_i32))
93
-DEF(ext8u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext8u_i32))
94
-DEF(ext16u_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ext16u_i32))
95
-DEF(bswap16_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap16_i32))
96
-DEF(bswap32_i32, 1, 1, 1, IMPL(TCG_TARGET_HAS_bswap32_i32))
97
-DEF(not_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_not_i32))
98
+DEF(ext8s_i32, 1, 1, 0, 0)
99
+DEF(ext16s_i32, 1, 1, 0, 0)
100
+DEF(ext8u_i32, 1, 1, 0, 0)
101
+DEF(ext16u_i32, 1, 1, 0, 0)
102
+DEF(bswap16_i32, 1, 1, 1, 0)
103
+DEF(bswap32_i32, 1, 1, 1, 0)
104
+DEF(not_i32, 1, 1, 0, 0)
105
DEF(neg_i32, 1, 1, 0, 0)
106
-DEF(andc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_andc_i32))
107
-DEF(orc_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_orc_i32))
108
-DEF(eqv_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_eqv_i32))
109
-DEF(nand_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nand_i32))
110
-DEF(nor_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_nor_i32))
111
-DEF(clz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_clz_i32))
112
-DEF(ctz_i32, 1, 2, 0, IMPL(TCG_TARGET_HAS_ctz_i32))
113
-DEF(ctpop_i32, 1, 1, 0, IMPL(TCG_TARGET_HAS_ctpop_i32))
114
+DEF(andc_i32, 1, 2, 0, 0)
115
+DEF(orc_i32, 1, 2, 0, 0)
116
+DEF(eqv_i32, 1, 2, 0, 0)
117
+DEF(nand_i32, 1, 2, 0, 0)
118
+DEF(nor_i32, 1, 2, 0, 0)
119
+DEF(clz_i32, 1, 2, 0, 0)
120
+DEF(ctz_i32, 1, 2, 0, 0)
121
+DEF(ctpop_i32, 1, 1, 0, 0)
122
123
DEF(mov_i64, 1, 1, 0, TCG_OPF_NOT_PRESENT)
124
-DEF(setcond_i64, 1, 2, 1, IMPL64)
125
-DEF(negsetcond_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_negsetcond_i64))
126
-DEF(movcond_i64, 1, 4, 1, IMPL64)
127
+DEF(setcond_i64, 1, 2, 1, 0)
128
+DEF(negsetcond_i64, 1, 2, 1, 0)
129
+DEF(movcond_i64, 1, 4, 1, 0)
130
/* load/store */
131
-DEF(ld8u_i64, 1, 1, 1, IMPL64)
132
-DEF(ld8s_i64, 1, 1, 1, IMPL64)
133
-DEF(ld16u_i64, 1, 1, 1, IMPL64)
134
-DEF(ld16s_i64, 1, 1, 1, IMPL64)
135
-DEF(ld32u_i64, 1, 1, 1, IMPL64)
136
-DEF(ld32s_i64, 1, 1, 1, IMPL64)
137
-DEF(ld_i64, 1, 1, 1, IMPL64)
138
-DEF(st8_i64, 0, 2, 1, IMPL64)
139
-DEF(st16_i64, 0, 2, 1, IMPL64)
140
-DEF(st32_i64, 0, 2, 1, IMPL64)
141
-DEF(st_i64, 0, 2, 1, IMPL64)
142
+DEF(ld8u_i64, 1, 1, 1, 0)
143
+DEF(ld8s_i64, 1, 1, 1, 0)
144
+DEF(ld16u_i64, 1, 1, 1, 0)
145
+DEF(ld16s_i64, 1, 1, 1, 0)
146
+DEF(ld32u_i64, 1, 1, 1, 0)
147
+DEF(ld32s_i64, 1, 1, 1, 0)
148
+DEF(ld_i64, 1, 1, 1, 0)
149
+DEF(st8_i64, 0, 2, 1, 0)
150
+DEF(st16_i64, 0, 2, 1, 0)
151
+DEF(st32_i64, 0, 2, 1, 0)
152
+DEF(st_i64, 0, 2, 1, 0)
153
/* arith */
154
-DEF(add_i64, 1, 2, 0, IMPL64)
155
-DEF(sub_i64, 1, 2, 0, IMPL64)
156
-DEF(mul_i64, 1, 2, 0, IMPL64)
157
-DEF(div_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
158
-DEF(divu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div_i64))
159
-DEF(rem_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
160
-DEF(remu_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rem_i64))
161
-DEF(div2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
162
-DEF(divu2_i64, 2, 3, 0, IMPL64 | IMPL(TCG_TARGET_HAS_div2_i64))
163
-DEF(and_i64, 1, 2, 0, IMPL64)
164
-DEF(or_i64, 1, 2, 0, IMPL64)
165
-DEF(xor_i64, 1, 2, 0, IMPL64)
166
+DEF(add_i64, 1, 2, 0, 0)
167
+DEF(sub_i64, 1, 2, 0, 0)
168
+DEF(mul_i64, 1, 2, 0, 0)
169
+DEF(div_i64, 1, 2, 0, 0)
170
+DEF(divu_i64, 1, 2, 0, 0)
171
+DEF(rem_i64, 1, 2, 0, 0)
172
+DEF(remu_i64, 1, 2, 0, 0)
173
+DEF(div2_i64, 2, 3, 0, 0)
174
+DEF(divu2_i64, 2, 3, 0, 0)
175
+DEF(and_i64, 1, 2, 0, 0)
176
+DEF(or_i64, 1, 2, 0, 0)
177
+DEF(xor_i64, 1, 2, 0, 0)
178
/* shifts/rotates */
179
-DEF(shl_i64, 1, 2, 0, IMPL64)
180
-DEF(shr_i64, 1, 2, 0, IMPL64)
181
-DEF(sar_i64, 1, 2, 0, IMPL64)
182
-DEF(rotl_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
183
-DEF(rotr_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_rot_i64))
184
-DEF(deposit_i64, 1, 2, 2, IMPL64 | IMPL(TCG_TARGET_HAS_deposit_i64))
185
-DEF(extract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_extract_i64))
186
-DEF(sextract_i64, 1, 1, 2, IMPL64 | IMPL(TCG_TARGET_HAS_sextract_i64))
187
-DEF(extract2_i64, 1, 2, 1, IMPL64 | IMPL(TCG_TARGET_HAS_extract2_i64))
188
+DEF(shl_i64, 1, 2, 0, 0)
189
+DEF(shr_i64, 1, 2, 0, 0)
190
+DEF(sar_i64, 1, 2, 0, 0)
191
+DEF(rotl_i64, 1, 2, 0, 0)
192
+DEF(rotr_i64, 1, 2, 0, 0)
193
+DEF(deposit_i64, 1, 2, 2, 0)
194
+DEF(extract_i64, 1, 1, 2, 0)
195
+DEF(sextract_i64, 1, 1, 2, 0)
196
+DEF(extract2_i64, 1, 2, 1, 0)
197
198
/* size changing ops */
199
-DEF(ext_i32_i64, 1, 1, 0, IMPL64)
200
-DEF(extu_i32_i64, 1, 1, 0, IMPL64)
201
-DEF(extrl_i64_i32, 1, 1, 0,
202
- IMPL(TCG_TARGET_HAS_extr_i64_i32)
203
- | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
204
-DEF(extrh_i64_i32, 1, 1, 0,
205
- IMPL(TCG_TARGET_HAS_extr_i64_i32)
206
- | (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
207
+DEF(ext_i32_i64, 1, 1, 0, 0)
208
+DEF(extu_i32_i64, 1, 1, 0, 0)
209
+DEF(extrl_i64_i32, 1, 1, 0, 0)
210
+DEF(extrh_i64_i32, 1, 1, 0, 0)
211
212
-DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL64)
213
-DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64))
214
-DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64))
215
-DEF(ext32s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32s_i64))
216
-DEF(ext8u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8u_i64))
217
-DEF(ext16u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16u_i64))
218
-DEF(ext32u_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext32u_i64))
219
-DEF(bswap16_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap16_i64))
220
-DEF(bswap32_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap32_i64))
221
-DEF(bswap64_i64, 1, 1, 1, IMPL64 | IMPL(TCG_TARGET_HAS_bswap64_i64))
222
-DEF(not_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_not_i64))
223
-DEF(neg_i64, 1, 1, 0, IMPL64)
224
-DEF(andc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_andc_i64))
225
-DEF(orc_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_orc_i64))
226
-DEF(eqv_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_eqv_i64))
227
-DEF(nand_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nand_i64))
228
-DEF(nor_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_nor_i64))
229
-DEF(clz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_clz_i64))
230
-DEF(ctz_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctz_i64))
231
-DEF(ctpop_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ctpop_i64))
232
+DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH)
233
+DEF(ext8s_i64, 1, 1, 0, 0)
234
+DEF(ext16s_i64, 1, 1, 0, 0)
235
+DEF(ext32s_i64, 1, 1, 0, 0)
236
+DEF(ext8u_i64, 1, 1, 0, 0)
237
+DEF(ext16u_i64, 1, 1, 0, 0)
238
+DEF(ext32u_i64, 1, 1, 0, 0)
239
+DEF(bswap16_i64, 1, 1, 1, 0)
240
+DEF(bswap32_i64, 1, 1, 1, 0)
241
+DEF(bswap64_i64, 1, 1, 1, 0)
242
+DEF(not_i64, 1, 1, 0, 0)
243
+DEF(neg_i64, 1, 1, 0, 0)
244
+DEF(andc_i64, 1, 2, 0, 0)
245
+DEF(orc_i64, 1, 2, 0, 0)
246
+DEF(eqv_i64, 1, 2, 0, 0)
247
+DEF(nand_i64, 1, 2, 0, 0)
248
+DEF(nor_i64, 1, 2, 0, 0)
249
+DEF(clz_i64, 1, 2, 0, 0)
250
+DEF(ctz_i64, 1, 2, 0, 0)
251
+DEF(ctpop_i64, 1, 1, 0, 0)
252
253
-DEF(add2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_add2_i64))
254
-DEF(sub2_i64, 2, 4, 0, IMPL64 | IMPL(TCG_TARGET_HAS_sub2_i64))
255
-DEF(mulu2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulu2_i64))
256
-DEF(muls2_i64, 2, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muls2_i64))
257
-DEF(muluh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_muluh_i64))
258
-DEF(mulsh_i64, 1, 2, 0, IMPL64 | IMPL(TCG_TARGET_HAS_mulsh_i64))
259
+DEF(add2_i64, 2, 4, 0, 0)
260
+DEF(sub2_i64, 2, 4, 0, 0)
261
+DEF(mulu2_i64, 2, 2, 0, 0)
262
+DEF(muls2_i64, 2, 2, 0, 0)
263
+DEF(muluh_i64, 1, 2, 0, 0)
264
+DEF(mulsh_i64, 1, 2, 0, 0)
265
266
#define DATA64_ARGS (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
267
268
@@ -XXX,XX +XXX,XX @@ DEF(qemu_st_a64_i64, 0, DATA64_ARGS + DATA64_ARGS, 1,
269
270
/* Only used by i386 to cope with stupid register constraints. */
271
DEF(qemu_st8_a32_i32, 0, 1 + 1, 1,
272
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
273
- IMPL(TCG_TARGET_HAS_qemu_st8_i32))
274
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
275
DEF(qemu_st8_a64_i32, 0, 1 + DATA64_ARGS, 1,
276
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
277
- IMPL(TCG_TARGET_HAS_qemu_st8_i32))
278
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
279
280
/* Only for 64-bit hosts at the moment. */
281
-DEF(qemu_ld_a32_i128, 2, 1, 1,
282
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
283
- IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
284
-DEF(qemu_ld_a64_i128, 2, 1, 1,
285
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
286
- IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
287
-DEF(qemu_st_a32_i128, 0, 3, 1,
288
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
289
- IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
290
-DEF(qemu_st_a64_i128, 0, 3, 1,
291
- TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
292
- IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
293
+DEF(qemu_ld_a32_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
294
+DEF(qemu_ld_a64_i128, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
295
+DEF(qemu_st_a32_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
296
+DEF(qemu_st_a64_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
297
298
/* Host vector support. */
299
300
-#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
301
+#define IMPLVEC TCG_OPF_VECTOR
302
303
DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
304
305
DEF(dup_vec, 1, 1, 0, IMPLVEC)
306
-DEF(dup2_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_REG_BITS == 32))
307
+DEF(dup2_vec, 1, 2, 0, IMPLVEC)
308
309
DEF(ld_vec, 1, 1, 1, IMPLVEC)
310
DEF(st_vec, 0, 2, 1, IMPLVEC)
311
@@ -XXX,XX +XXX,XX @@ DEF(dupm_vec, 1, 1, 1, IMPLVEC)
312
313
DEF(add_vec, 1, 2, 0, IMPLVEC)
314
DEF(sub_vec, 1, 2, 0, IMPLVEC)
315
-DEF(mul_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_mul_vec))
316
-DEF(neg_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_neg_vec))
317
-DEF(abs_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_abs_vec))
318
-DEF(ssadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
319
-DEF(usadd_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
320
-DEF(sssub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
321
-DEF(ussub_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_sat_vec))
322
-DEF(smin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
323
-DEF(umin_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
324
-DEF(smax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
325
-DEF(umax_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_minmax_vec))
326
+DEF(mul_vec, 1, 2, 0, IMPLVEC)
327
+DEF(neg_vec, 1, 1, 0, IMPLVEC)
328
+DEF(abs_vec, 1, 1, 0, IMPLVEC)
329
+DEF(ssadd_vec, 1, 2, 0, IMPLVEC)
330
+DEF(usadd_vec, 1, 2, 0, IMPLVEC)
331
+DEF(sssub_vec, 1, 2, 0, IMPLVEC)
332
+DEF(ussub_vec, 1, 2, 0, IMPLVEC)
333
+DEF(smin_vec, 1, 2, 0, IMPLVEC)
334
+DEF(umin_vec, 1, 2, 0, IMPLVEC)
335
+DEF(smax_vec, 1, 2, 0, IMPLVEC)
336
+DEF(umax_vec, 1, 2, 0, IMPLVEC)
337
338
DEF(and_vec, 1, 2, 0, IMPLVEC)
339
DEF(or_vec, 1, 2, 0, IMPLVEC)
340
DEF(xor_vec, 1, 2, 0, IMPLVEC)
341
-DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec))
342
-DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec))
343
-DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec))
344
-DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec))
345
-DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec))
346
-DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
347
+DEF(andc_vec, 1, 2, 0, IMPLVEC)
348
+DEF(orc_vec, 1, 2, 0, IMPLVEC)
349
+DEF(nand_vec, 1, 2, 0, IMPLVEC)
350
+DEF(nor_vec, 1, 2, 0, IMPLVEC)
351
+DEF(eqv_vec, 1, 2, 0, IMPLVEC)
352
+DEF(not_vec, 1, 1, 0, IMPLVEC)
353
354
-DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
355
-DEF(shri_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
356
-DEF(sari_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
357
-DEF(rotli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_roti_vec))
358
+DEF(shli_vec, 1, 1, 1, IMPLVEC)
359
+DEF(shri_vec, 1, 1, 1, IMPLVEC)
360
+DEF(sari_vec, 1, 1, 1, IMPLVEC)
361
+DEF(rotli_vec, 1, 1, 1, IMPLVEC)
362
363
-DEF(shls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
364
-DEF(shrs_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
365
-DEF(sars_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shs_vec))
366
-DEF(rotls_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rots_vec))
367
+DEF(shls_vec, 1, 2, 0, IMPLVEC)
368
+DEF(shrs_vec, 1, 2, 0, IMPLVEC)
369
+DEF(sars_vec, 1, 2, 0, IMPLVEC)
370
+DEF(rotls_vec, 1, 2, 0, IMPLVEC)
371
372
-DEF(shlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
373
-DEF(shrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
374
-DEF(sarv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_shv_vec))
375
-DEF(rotlv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
376
-DEF(rotrv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_rotv_vec))
377
+DEF(shlv_vec, 1, 2, 0, IMPLVEC)
378
+DEF(shrv_vec, 1, 2, 0, IMPLVEC)
379
+DEF(sarv_vec, 1, 2, 0, IMPLVEC)
380
+DEF(rotlv_vec, 1, 2, 0, IMPLVEC)
381
+DEF(rotrv_vec, 1, 2, 0, IMPLVEC)
382
383
DEF(cmp_vec, 1, 2, 1, IMPLVEC)
384
385
-DEF(bitsel_vec, 1, 3, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_bitsel_vec))
386
-DEF(cmpsel_vec, 1, 4, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_cmpsel_vec))
387
+DEF(bitsel_vec, 1, 3, 0, IMPLVEC)
388
+DEF(cmpsel_vec, 1, 4, 1, IMPLVEC)
389
390
DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
391
392
#include "tcg-target-opc.h.inc"
393
394
#undef DATA64_ARGS
395
-#undef IMPL
396
-#undef IMPL64
397
#undef IMPLVEC
398
#undef DEF
399
--
400
2.43.0
401
402
diff view generated by jsdifflib
New patch
1
1
This is now a direct replacement.
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
include/tcg/tcg-opc.h | 89 +++++++++++++++-----------------
7
tcg/aarch64/tcg-target-opc.h.inc | 4 +-
8
tcg/arm/tcg-target-opc.h.inc | 6 +--
9
tcg/i386/tcg-target-opc.h.inc | 22 ++++----
10
tcg/ppc/tcg-target-opc.h.inc | 12 ++---
11
tcg/s390x/tcg-target-opc.h.inc | 6 +--
12
6 files changed, 68 insertions(+), 71 deletions(-)
13
14
diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/tcg/tcg-opc.h
17
+++ b/include/tcg/tcg-opc.h
18
@@ -XXX,XX +XXX,XX @@ DEF(qemu_st_a64_i128, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
19
20
/* Host vector support. */
21
22
-#define IMPLVEC TCG_OPF_VECTOR
23
-
24
DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)
25
26
-DEF(dup_vec, 1, 1, 0, IMPLVEC)
27
-DEF(dup2_vec, 1, 2, 0, IMPLVEC)
28
+DEF(dup_vec, 1, 1, 0, TCG_OPF_VECTOR)
29
+DEF(dup2_vec, 1, 2, 0, TCG_OPF_VECTOR)
30
31
-DEF(ld_vec, 1, 1, 1, IMPLVEC)
32
-DEF(st_vec, 0, 2, 1, IMPLVEC)
33
-DEF(dupm_vec, 1, 1, 1, IMPLVEC)
34
+DEF(ld_vec, 1, 1, 1, TCG_OPF_VECTOR)
35
+DEF(st_vec, 0, 2, 1, TCG_OPF_VECTOR)
36
+DEF(dupm_vec, 1, 1, 1, TCG_OPF_VECTOR)
37
38
-DEF(add_vec, 1, 2, 0, IMPLVEC)
39
-DEF(sub_vec, 1, 2, 0, IMPLVEC)
40
-DEF(mul_vec, 1, 2, 0, IMPLVEC)
41
-DEF(neg_vec, 1, 1, 0, IMPLVEC)
42
-DEF(abs_vec, 1, 1, 0, IMPLVEC)
43
-DEF(ssadd_vec, 1, 2, 0, IMPLVEC)
44
-DEF(usadd_vec, 1, 2, 0, IMPLVEC)
45
-DEF(sssub_vec, 1, 2, 0, IMPLVEC)
46
-DEF(ussub_vec, 1, 2, 0, IMPLVEC)
47
-DEF(smin_vec, 1, 2, 0, IMPLVEC)
48
-DEF(umin_vec, 1, 2, 0, IMPLVEC)
49
-DEF(smax_vec, 1, 2, 0, IMPLVEC)
50
-DEF(umax_vec, 1, 2, 0, IMPLVEC)
51
+DEF(add_vec, 1, 2, 0, TCG_OPF_VECTOR)
52
+DEF(sub_vec, 1, 2, 0, TCG_OPF_VECTOR)
53
+DEF(mul_vec, 1, 2, 0, TCG_OPF_VECTOR)
54
+DEF(neg_vec, 1, 1, 0, TCG_OPF_VECTOR)
55
+DEF(abs_vec, 1, 1, 0, TCG_OPF_VECTOR)
56
+DEF(ssadd_vec, 1, 2, 0, TCG_OPF_VECTOR)
57
+DEF(usadd_vec, 1, 2, 0, TCG_OPF_VECTOR)
58
+DEF(sssub_vec, 1, 2, 0, TCG_OPF_VECTOR)
59
+DEF(ussub_vec, 1, 2, 0, TCG_OPF_VECTOR)
60
+DEF(smin_vec, 1, 2, 0, TCG_OPF_VECTOR)
61
+DEF(umin_vec, 1, 2, 0, TCG_OPF_VECTOR)
62
+DEF(smax_vec, 1, 2, 0, TCG_OPF_VECTOR)
63
+DEF(umax_vec, 1, 2, 0, TCG_OPF_VECTOR)
64
65
-DEF(and_vec, 1, 2, 0, IMPLVEC)
66
-DEF(or_vec, 1, 2, 0, IMPLVEC)
67
-DEF(xor_vec, 1, 2, 0, IMPLVEC)
68
-DEF(andc_vec, 1, 2, 0, IMPLVEC)
69
-DEF(orc_vec, 1, 2, 0, IMPLVEC)
70
-DEF(nand_vec, 1, 2, 0, IMPLVEC)
71
-DEF(nor_vec, 1, 2, 0, IMPLVEC)
72
-DEF(eqv_vec, 1, 2, 0, IMPLVEC)
73
-DEF(not_vec, 1, 1, 0, IMPLVEC)
74
+DEF(and_vec, 1, 2, 0, TCG_OPF_VECTOR)
75
+DEF(or_vec, 1, 2, 0, TCG_OPF_VECTOR)
76
+DEF(xor_vec, 1, 2, 0, TCG_OPF_VECTOR)
77
+DEF(andc_vec, 1, 2, 0, TCG_OPF_VECTOR)
78
+DEF(orc_vec, 1, 2, 0, TCG_OPF_VECTOR)
79
+DEF(nand_vec, 1, 2, 0, TCG_OPF_VECTOR)
80
+DEF(nor_vec, 1, 2, 0, TCG_OPF_VECTOR)
81
+DEF(eqv_vec, 1, 2, 0, TCG_OPF_VECTOR)
82
+DEF(not_vec, 1, 1, 0, TCG_OPF_VECTOR)
83
84
-DEF(shli_vec, 1, 1, 1, IMPLVEC)
85
-DEF(shri_vec, 1, 1, 1, IMPLVEC)
86
-DEF(sari_vec, 1, 1, 1, IMPLVEC)
87
-DEF(rotli_vec, 1, 1, 1, IMPLVEC)
88
+DEF(shli_vec, 1, 1, 1, TCG_OPF_VECTOR)
89
+DEF(shri_vec, 1, 1, 1, TCG_OPF_VECTOR)
90
+DEF(sari_vec, 1, 1, 1, TCG_OPF_VECTOR)
91
+DEF(rotli_vec, 1, 1, 1, TCG_OPF_VECTOR)
92
93
-DEF(shls_vec, 1, 2, 0, IMPLVEC)
94
-DEF(shrs_vec, 1, 2, 0, IMPLVEC)
95
-DEF(sars_vec, 1, 2, 0, IMPLVEC)
96
-DEF(rotls_vec, 1, 2, 0, IMPLVEC)
97
+DEF(shls_vec, 1, 2, 0, TCG_OPF_VECTOR)
98
+DEF(shrs_vec, 1, 2, 0, TCG_OPF_VECTOR)
99
+DEF(sars_vec, 1, 2, 0, TCG_OPF_VECTOR)
100
+DEF(rotls_vec, 1, 2, 0, TCG_OPF_VECTOR)
101
102
-DEF(shlv_vec, 1, 2, 0, IMPLVEC)
103
-DEF(shrv_vec, 1, 2, 0, IMPLVEC)
104
-DEF(sarv_vec, 1, 2, 0, IMPLVEC)
105
-DEF(rotlv_vec, 1, 2, 0, IMPLVEC)
106
-DEF(rotrv_vec, 1, 2, 0, IMPLVEC)
107
+DEF(shlv_vec, 1, 2, 0, TCG_OPF_VECTOR)
108
+DEF(shrv_vec, 1, 2, 0, TCG_OPF_VECTOR)
109
+DEF(sarv_vec, 1, 2, 0, TCG_OPF_VECTOR)
110
+DEF(rotlv_vec, 1, 2, 0, TCG_OPF_VECTOR)
111
+DEF(rotrv_vec, 1, 2, 0, TCG_OPF_VECTOR)
112
113
-DEF(cmp_vec, 1, 2, 1, IMPLVEC)
114
+DEF(cmp_vec, 1, 2, 1, TCG_OPF_VECTOR)
115
116
-DEF(bitsel_vec, 1, 3, 0, IMPLVEC)
117
-DEF(cmpsel_vec, 1, 4, 1, IMPLVEC)
118
+DEF(bitsel_vec, 1, 3, 0, TCG_OPF_VECTOR)
119
+DEF(cmpsel_vec, 1, 4, 1, TCG_OPF_VECTOR)
120
121
DEF(last_generic, 0, 0, 0, TCG_OPF_NOT_PRESENT)
122
123
#include "tcg-target-opc.h.inc"
124
125
#undef DATA64_ARGS
126
-#undef IMPLVEC
127
#undef DEF
128
diff --git a/tcg/aarch64/tcg-target-opc.h.inc b/tcg/aarch64/tcg-target-opc.h.inc
129
index XXXXXXX..XXXXXXX 100644
130
--- a/tcg/aarch64/tcg-target-opc.h.inc
131
+++ b/tcg/aarch64/tcg-target-opc.h.inc
132
@@ -XXX,XX +XXX,XX @@
133
* consider these to be UNSPEC with names.
134
*/
135
136
-DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC)
137
-DEF(aa64_sli_vec, 1, 2, 1, IMPLVEC)
138
+DEF(aa64_sshl_vec, 1, 2, 0, TCG_OPF_VECTOR)
139
+DEF(aa64_sli_vec, 1, 2, 1, TCG_OPF_VECTOR)
140
diff --git a/tcg/arm/tcg-target-opc.h.inc b/tcg/arm/tcg-target-opc.h.inc
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/arm/tcg-target-opc.h.inc
143
+++ b/tcg/arm/tcg-target-opc.h.inc
144
@@ -XXX,XX +XXX,XX @@
145
* consider these to be UNSPEC with names.
146
*/
147
148
-DEF(arm_sli_vec, 1, 2, 1, IMPLVEC)
149
-DEF(arm_sshl_vec, 1, 2, 0, IMPLVEC)
150
-DEF(arm_ushl_vec, 1, 2, 0, IMPLVEC)
151
+DEF(arm_sli_vec, 1, 2, 1, TCG_OPF_VECTOR)
152
+DEF(arm_sshl_vec, 1, 2, 0, TCG_OPF_VECTOR)
153
+DEF(arm_ushl_vec, 1, 2, 0, TCG_OPF_VECTOR)
154
diff --git a/tcg/i386/tcg-target-opc.h.inc b/tcg/i386/tcg-target-opc.h.inc
155
index XXXXXXX..XXXXXXX 100644
156
--- a/tcg/i386/tcg-target-opc.h.inc
157
+++ b/tcg/i386/tcg-target-opc.h.inc
158
@@ -XXX,XX +XXX,XX @@
159
* consider these to be UNSPEC with names.
160
*/
161
162
-DEF(x86_shufps_vec, 1, 2, 1, IMPLVEC)
163
-DEF(x86_blend_vec, 1, 2, 1, IMPLVEC)
164
-DEF(x86_packss_vec, 1, 2, 0, IMPLVEC)
165
-DEF(x86_packus_vec, 1, 2, 0, IMPLVEC)
166
-DEF(x86_psrldq_vec, 1, 1, 1, IMPLVEC)
167
-DEF(x86_vperm2i128_vec, 1, 2, 1, IMPLVEC)
168
-DEF(x86_punpckl_vec, 1, 2, 0, IMPLVEC)
169
-DEF(x86_punpckh_vec, 1, 2, 0, IMPLVEC)
170
-DEF(x86_vpshldi_vec, 1, 2, 1, IMPLVEC)
171
-DEF(x86_vpshldv_vec, 1, 3, 0, IMPLVEC)
172
-DEF(x86_vpshrdv_vec, 1, 3, 0, IMPLVEC)
173
+DEF(x86_shufps_vec, 1, 2, 1, TCG_OPF_VECTOR)
174
+DEF(x86_blend_vec, 1, 2, 1, TCG_OPF_VECTOR)
175
+DEF(x86_packss_vec, 1, 2, 0, TCG_OPF_VECTOR)
176
+DEF(x86_packus_vec, 1, 2, 0, TCG_OPF_VECTOR)
177
+DEF(x86_psrldq_vec, 1, 1, 1, TCG_OPF_VECTOR)
178
+DEF(x86_vperm2i128_vec, 1, 2, 1, TCG_OPF_VECTOR)
179
+DEF(x86_punpckl_vec, 1, 2, 0, TCG_OPF_VECTOR)
180
+DEF(x86_punpckh_vec, 1, 2, 0, TCG_OPF_VECTOR)
181
+DEF(x86_vpshldi_vec, 1, 2, 1, TCG_OPF_VECTOR)
182
+DEF(x86_vpshldv_vec, 1, 3, 0, TCG_OPF_VECTOR)
183
+DEF(x86_vpshrdv_vec, 1, 3, 0, TCG_OPF_VECTOR)
184
diff --git a/tcg/ppc/tcg-target-opc.h.inc b/tcg/ppc/tcg-target-opc.h.inc
185
index XXXXXXX..XXXXXXX 100644
186
--- a/tcg/ppc/tcg-target-opc.h.inc
187
+++ b/tcg/ppc/tcg-target-opc.h.inc
188
@@ -XXX,XX +XXX,XX @@
189
* consider these to be UNSPEC with names.
190
*/
191
192
-DEF(ppc_mrgh_vec, 1, 2, 0, IMPLVEC)
193
-DEF(ppc_mrgl_vec, 1, 2, 0, IMPLVEC)
194
-DEF(ppc_msum_vec, 1, 3, 0, IMPLVEC)
195
-DEF(ppc_muleu_vec, 1, 2, 0, IMPLVEC)
196
-DEF(ppc_mulou_vec, 1, 2, 0, IMPLVEC)
197
-DEF(ppc_pkum_vec, 1, 2, 0, IMPLVEC)
198
+DEF(ppc_mrgh_vec, 1, 2, 0, TCG_OPF_VECTOR)
199
+DEF(ppc_mrgl_vec, 1, 2, 0, TCG_OPF_VECTOR)
200
+DEF(ppc_msum_vec, 1, 3, 0, TCG_OPF_VECTOR)
201
+DEF(ppc_muleu_vec, 1, 2, 0, TCG_OPF_VECTOR)
202
+DEF(ppc_mulou_vec, 1, 2, 0, TCG_OPF_VECTOR)
203
+DEF(ppc_pkum_vec, 1, 2, 0, TCG_OPF_VECTOR)
204
diff --git a/tcg/s390x/tcg-target-opc.h.inc b/tcg/s390x/tcg-target-opc.h.inc
205
index XXXXXXX..XXXXXXX 100644
206
--- a/tcg/s390x/tcg-target-opc.h.inc
207
+++ b/tcg/s390x/tcg-target-opc.h.inc
208
@@ -XXX,XX +XXX,XX @@
209
* emitted by tcg_expand_vec_op. For those familiar with GCC internals,
210
* consider these to be UNSPEC with names.
211
*/
212
-DEF(s390_vuph_vec, 1, 1, 0, IMPLVEC)
213
-DEF(s390_vupl_vec, 1, 1, 0, IMPLVEC)
214
-DEF(s390_vpks_vec, 1, 2, 0, IMPLVEC)
215
+DEF(s390_vuph_vec, 1, 1, 0, TCG_OPF_VECTOR)
216
+DEF(s390_vupl_vec, 1, 1, 0, TCG_OPF_VECTOR)
217
+DEF(s390_vpks_vec, 1, 2, 0, TCG_OPF_VECTOR)
218
--
219
2.43.0
220
221
diff view generated by jsdifflib
New patch
1
We always provide bswap subroutines, whether they are optimized
2
using mips32r2 when available or not.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/mips/tcg-target-has.h | 8 ++++----
8
1 file changed, 4 insertions(+), 4 deletions(-)
9
10
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/mips/tcg-target-has.h
13
+++ b/tcg/mips/tcg-target-has.h
14
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
15
#define TCG_TARGET_HAS_muls2_i32 (!use_mips32r6_instructions)
16
#define TCG_TARGET_HAS_muluh_i32 1
17
#define TCG_TARGET_HAS_mulsh_i32 1
18
+#define TCG_TARGET_HAS_bswap16_i32 1
19
#define TCG_TARGET_HAS_bswap32_i32 1
20
#define TCG_TARGET_HAS_negsetcond_i32 0
21
22
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
23
#endif
24
25
/* optional instructions detected at runtime */
26
-#define TCG_TARGET_HAS_bswap16_i32 use_mips32r2_instructions
27
#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
28
#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions
29
#define TCG_TARGET_HAS_sextract_i32 0
30
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
31
#define TCG_TARGET_HAS_qemu_st8_i32 0
32
33
#if TCG_TARGET_REG_BITS == 64
34
-#define TCG_TARGET_HAS_bswap16_i64 use_mips32r2_instructions
35
-#define TCG_TARGET_HAS_bswap32_i64 use_mips32r2_instructions
36
-#define TCG_TARGET_HAS_bswap64_i64 use_mips32r2_instructions
37
+#define TCG_TARGET_HAS_bswap16_i64 1
38
+#define TCG_TARGET_HAS_bswap32_i64 1
39
+#define TCG_TARGET_HAS_bswap64_i64 1
40
#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
41
#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions
42
#define TCG_TARGET_HAS_sextract_i64 0
43
--
44
2.43.0
45
46
diff view generated by jsdifflib
1
Bool is more appropriate type for the alloc parameter.
1
When we generalize {s}extract_i32, we'll lose the
2
specific register constraints on ext8u and ext8s.
3
It's just as easy to emit a couple of insns instead.
2
4
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
---
7
accel/tcg/translate-all.c | 14 +++++++-------
8
tcg/i386/tcg-target.c.inc | 23 +++++++++++++++++++----
8
1 file changed, 7 insertions(+), 7 deletions(-)
9
1 file changed, 19 insertions(+), 4 deletions(-)
9
10
10
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
11
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
12
--- a/accel/tcg/translate-all.c
13
--- a/tcg/i386/tcg-target.c.inc
13
+++ b/accel/tcg/translate-all.c
14
+++ b/tcg/i386/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ void page_init(void)
15
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_rolw_8(TCGContext *s, int reg)
15
#endif
16
17
static void tcg_out_ext8u(TCGContext *s, TCGReg dest, TCGReg src)
18
{
19
- /* movzbl */
20
- tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
21
+ if (TCG_TARGET_REG_BITS == 32 && src >= 4) {
22
+ tcg_out_mov(s, TCG_TYPE_I32, dest, src);
23
+ if (dest >= 4) {
24
+ tcg_out_modrm(s, OPC_ARITH_EvIz, ARITH_AND, dest);
25
+ tcg_out32(s, 0xff);
26
+ return;
27
+ }
28
+ src = dest;
29
+ }
30
tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src);
16
}
31
}
17
32
18
-static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
33
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
19
+static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
20
{
34
{
21
PageDesc *pd;
35
int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
22
void **lp;
36
- /* movsbl */
23
@@ -XXX,XX +XXX,XX @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
37
- tcg_debug_assert(src < 4 || TCG_TARGET_REG_BITS == 64);
24
38
+
25
static inline PageDesc *page_find(tb_page_addr_t index)
39
+ if (TCG_TARGET_REG_BITS == 32 && src >= 4) {
26
{
40
+ tcg_out_mov(s, TCG_TYPE_I32, dest, src);
27
- return page_find_alloc(index, 0);
41
+ if (dest >= 4) {
28
+ return page_find_alloc(index, false);
42
+ tcg_out_shifti(s, SHIFT_SHL, dest, 24);
43
+ tcg_out_shifti(s, SHIFT_SAR, dest, 24);
44
+ return;
45
+ }
46
+ src = dest;
47
+ }
48
tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src);
29
}
49
}
30
50
31
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
32
- PageDesc **ret_p2, tb_page_addr_t phys2, int alloc);
33
+ PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc);
34
35
/* In user-mode page locks aren't used; mmap_lock is enough */
36
#ifdef CONFIG_USER_ONLY
37
@@ -XXX,XX +XXX,XX @@ static inline void page_unlock(PageDesc *pd)
38
/* lock the page(s) of a TB in the correct acquisition order */
39
static inline void page_lock_tb(const TranslationBlock *tb)
40
{
41
- page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], 0);
42
+ page_lock_pair(NULL, tb->page_addr[0], NULL, tb->page_addr[1], false);
43
}
44
45
static inline void page_unlock_tb(const TranslationBlock *tb)
46
@@ -XXX,XX +XXX,XX @@ void page_collection_unlock(struct page_collection *set)
47
#endif /* !CONFIG_USER_ONLY */
48
49
static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
50
- PageDesc **ret_p2, tb_page_addr_t phys2, int alloc)
51
+ PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
52
{
53
PageDesc *p1, *p2;
54
tb_page_addr_t page1;
55
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
56
* Note that inserting into the hash table first isn't an option, since
57
* we can only insert TBs that are fully initialized.
58
*/
59
- page_lock_pair(&p, phys_pc, &p2, phys_page2, 1);
60
+ page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
61
tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
62
if (p2) {
63
tb_page_add(p2, tb, 1, phys_page2);
64
@@ -XXX,XX +XXX,XX @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
65
for (addr = start, len = end - start;
66
len != 0;
67
len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
68
- PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
69
+ PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, true);
70
71
/* If the write protection bit is set, then we invalidate
72
the code inside. */
73
--
51
--
74
2.34.1
52
2.43.0
75
53
76
54
diff view generated by jsdifflib
New patch
1
1
Accept byte and word extensions with the extract opcodes.
2
This is preparatory to removing the specialized extracts.
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/i386/tcg-target-has.h | 49 +++++++++++++++++++++++++++----
8
tcg/tcg-has.h | 12 +++++---
9
tcg/optimize.c | 8 +++--
10
tcg/tcg-op.c | 12 +++-----
11
tcg/i386/tcg-target.c.inc | 62 +++++++++++++++++++++++++++++----------
12
5 files changed, 107 insertions(+), 36 deletions(-)
13
14
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/tcg/i386/tcg-target-has.h
17
+++ b/tcg/i386/tcg-target-has.h
18
@@ -XXX,XX +XXX,XX @@
19
#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
20
#define TCG_TARGET_HAS_deposit_i64 1
21
#define TCG_TARGET_HAS_extract_i64 1
22
-#define TCG_TARGET_HAS_sextract_i64 0
23
+#define TCG_TARGET_HAS_sextract_i64 1
24
#define TCG_TARGET_HAS_extract2_i64 1
25
#define TCG_TARGET_HAS_negsetcond_i64 1
26
#define TCG_TARGET_HAS_add2_i64 1
27
@@ -XXX,XX +XXX,XX @@
28
(TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8))
29
#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid
30
31
-/* Check for the possibility of high-byte extraction and, for 64-bit,
32
- zero-extending 32-bit right-shift. */
33
-#define TCG_TARGET_extract_i32_valid(ofs, len) ((ofs) == 8 && (len) == 8)
34
-#define TCG_TARGET_extract_i64_valid(ofs, len) \
35
- (((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
36
+/*
37
+ * Check for the possibility of low byte/word extraction, high-byte extraction
38
+ * and zero-extending 32-bit right-shift.
39
+ *
40
+ * We cannot sign-extend from high byte to 64-bits without using the
41
+ * REX prefix that explicitly excludes access to the high-byte registers.
42
+ */
43
+static inline bool
44
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
45
+{
46
+ switch (ofs) {
47
+ case 0:
48
+ switch (len) {
49
+ case 8:
50
+ case 16:
51
+ return true;
52
+ case 32:
53
+ return type == TCG_TYPE_I64;
54
+ }
55
+ return false;
56
+ case 8:
57
+ return len == 8 && type == TCG_TYPE_I32;
58
+ }
59
+ return false;
60
+}
61
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
62
+
63
+static inline bool
64
+tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len)
65
+{
66
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
67
+ return true;
68
+ }
69
+ switch (ofs) {
70
+ case 0:
71
+ return len == 8 || len == 16;
72
+ case 8:
73
+ return len == 8;
74
+ }
75
+ return false;
76
+}
77
+#define TCG_TARGET_extract_valid tcg_target_extract_valid
78
79
#endif
80
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
81
index XXXXXXX..XXXXXXX 100644
82
--- a/tcg/tcg-has.h
83
+++ b/tcg/tcg-has.h
84
@@ -XXX,XX +XXX,XX @@
85
#ifndef TCG_TARGET_deposit_i64_valid
86
#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
87
#endif
88
-#ifndef TCG_TARGET_extract_i32_valid
89
-#define TCG_TARGET_extract_i32_valid(ofs, len) 1
90
+#ifndef TCG_TARGET_extract_valid
91
+#define TCG_TARGET_extract_valid(type, ofs, len) \
92
+ ((type) == TCG_TYPE_I32 ? TCG_TARGET_HAS_extract_i32 \
93
+ : TCG_TARGET_HAS_extract_i64)
94
#endif
95
-#ifndef TCG_TARGET_extract_i64_valid
96
-#define TCG_TARGET_extract_i64_valid(ofs, len) 1
97
+#ifndef TCG_TARGET_sextract_valid
98
+#define TCG_TARGET_sextract_valid(type, ofs, len) \
99
+ ((type) == TCG_TYPE_I32 ? TCG_TARGET_HAS_sextract_i32 \
100
+ : TCG_TARGET_HAS_sextract_i64)
101
#endif
102
103
/* Only one of DIV or DIV2 should be defined. */
104
diff --git a/tcg/optimize.c b/tcg/optimize.c
105
index XXXXXXX..XXXXXXX 100644
106
--- a/tcg/optimize.c
107
+++ b/tcg/optimize.c
108
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
109
xor_opc = INDEX_op_xor_i32;
110
shr_opc = INDEX_op_shr_i32;
111
neg_opc = INDEX_op_neg_i32;
112
- if (TCG_TARGET_extract_i32_valid(sh, 1)) {
113
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
114
uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0;
115
+ }
116
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
117
sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0;
118
}
119
break;
120
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
121
xor_opc = INDEX_op_xor_i64;
122
shr_opc = INDEX_op_shr_i64;
123
neg_opc = INDEX_op_neg_i64;
124
- if (TCG_TARGET_extract_i64_valid(sh, 1)) {
125
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
126
uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0;
127
+ }
128
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
129
sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0;
130
}
131
break;
132
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
133
index XXXXXXX..XXXXXXX 100644
134
--- a/tcg/tcg-op.c
135
+++ b/tcg/tcg-op.c
136
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg,
137
return;
138
}
139
140
- if (TCG_TARGET_HAS_extract_i32
141
- && TCG_TARGET_extract_i32_valid(ofs, len)) {
142
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I32, ofs, len)) {
143
tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len);
144
return;
145
}
146
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg,
147
}
148
}
149
150
- if (TCG_TARGET_HAS_sextract_i32
151
- && TCG_TARGET_extract_i32_valid(ofs, len)) {
152
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, ofs, len)) {
153
tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len);
154
return;
155
}
156
@@ -XXX,XX +XXX,XX @@ void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg,
157
goto do_shift_and;
158
}
159
160
- if (TCG_TARGET_HAS_extract_i64
161
- && TCG_TARGET_extract_i64_valid(ofs, len)) {
162
+ if (TCG_TARGET_extract_valid(TCG_TYPE_I64, ofs, len)) {
163
tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len);
164
return;
165
}
166
@@ -XXX,XX +XXX,XX @@ void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg,
167
return;
168
}
169
170
- if (TCG_TARGET_HAS_sextract_i64
171
- && TCG_TARGET_extract_i64_valid(ofs, len)) {
172
+ if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, ofs, len)) {
173
tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len);
174
return;
175
}
176
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
177
index XXXXXXX..XXXXXXX 100644
178
--- a/tcg/i386/tcg-target.c.inc
179
+++ b/tcg/i386/tcg-target.c.inc
180
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
181
182
case INDEX_op_extract_i64:
183
if (a2 + args[3] == 32) {
184
+ if (a2 == 0) {
185
+ tcg_out_ext32u(s, a0, a1);
186
+ break;
187
+ }
188
/* This is a 32-bit zero-extending right shift. */
189
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
190
tcg_out_shifti(s, SHIFT_SHR, a0, a2);
191
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
192
}
193
/* FALLTHRU */
194
case INDEX_op_extract_i32:
195
- /* On the off-chance that we can use the high-byte registers.
196
- Otherwise we emit the same ext16 + shift pattern that we
197
- would have gotten from the normal tcg-op.c expansion. */
198
- tcg_debug_assert(a2 == 8 && args[3] == 8);
199
- if (a1 < 4 && a0 < 8) {
200
- tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
201
- } else {
202
+ if (a2 == 0 && args[3] == 8) {
203
+ tcg_out_ext8u(s, a0, a1);
204
+ } else if (a2 == 0 && args[3] == 16) {
205
tcg_out_ext16u(s, a0, a1);
206
- tcg_out_shifti(s, SHIFT_SHR, a0, 8);
207
+ } else if (a2 == 8 && args[3] == 8) {
208
+ /*
209
+ * On the off-chance that we can use the high-byte registers.
210
+ * Otherwise we emit the same ext16 + shift pattern that we
211
+ * would have gotten from the normal tcg-op.c expansion.
212
+ */
213
+ if (a1 < 4 && a0 < 8) {
214
+ tcg_out_modrm(s, OPC_MOVZBL, a0, a1 + 4);
215
+ } else {
216
+ tcg_out_ext16u(s, a0, a1);
217
+ tcg_out_shifti(s, SHIFT_SHR, a0, 8);
218
+ }
219
+ } else {
220
+ g_assert_not_reached();
221
+ }
222
+ break;
223
+
224
+ case INDEX_op_sextract_i64:
225
+ if (a2 == 0 && args[3] == 8) {
226
+ tcg_out_ext8s(s, TCG_TYPE_I64, a0, a1);
227
+ } else if (a2 == 0 && args[3] == 16) {
228
+ tcg_out_ext16s(s, TCG_TYPE_I64, a0, a1);
229
+ } else if (a2 == 0 && args[3] == 32) {
230
+ tcg_out_ext32s(s, a0, a1);
231
+ } else {
232
+ g_assert_not_reached();
233
}
234
break;
235
236
case INDEX_op_sextract_i32:
237
- /* We don't implement sextract_i64, as we cannot sign-extend to
238
- 64-bits without using the REX prefix that explicitly excludes
239
- access to the high-byte registers. */
240
- tcg_debug_assert(a2 == 8 && args[3] == 8);
241
- if (a1 < 4 && a0 < 8) {
242
- tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
243
- } else {
244
+ if (a2 == 0 && args[3] == 8) {
245
+ tcg_out_ext8s(s, TCG_TYPE_I32, a0, a1);
246
+ } else if (a2 == 0 && args[3] == 16) {
247
tcg_out_ext16s(s, TCG_TYPE_I32, a0, a1);
248
- tcg_out_shifti(s, SHIFT_SAR, a0, 8);
249
+ } else if (a2 == 8 && args[3] == 8) {
250
+ if (a1 < 4 && a0 < 8) {
251
+ tcg_out_modrm(s, OPC_MOVSBL, a0, a1 + 4);
252
+ } else {
253
+ tcg_out_ext16s(s, TCG_TYPE_I32, a0, a1);
254
+ tcg_out_shifti(s, SHIFT_SAR, a0, 8);
255
+ }
256
+ } else {
257
+ g_assert_not_reached();
258
}
259
break;
260
261
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
262
case INDEX_op_extract_i32:
263
case INDEX_op_extract_i64:
264
case INDEX_op_sextract_i32:
265
+ case INDEX_op_sextract_i64:
266
case INDEX_op_ctpop_i32:
267
case INDEX_op_ctpop_i64:
268
return C_O1_I1(r, r);
269
--
270
2.43.0
271
272
diff view generated by jsdifflib
New patch
1
Trivially mirrors TCG_TARGET_HAS_{s}extract_*.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/aarch64/tcg-target-has.h | 3 +++
7
1 file changed, 3 insertions(+)
8
9
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/aarch64/tcg-target-has.h
12
+++ b/tcg/aarch64/tcg-target-has.h
13
@@ -XXX,XX +XXX,XX @@
14
#define TCG_TARGET_HAS_cmpsel_vec 0
15
#define TCG_TARGET_HAS_tst_vec 1
16
17
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
18
+#define TCG_TARGET_sextract_valid(type, ofs, len) 1
19
+
20
#endif
21
--
22
2.43.0
23
24
diff view generated by jsdifflib
New patch
1
We're about to change canonicalization of masks as extract
2
instead of and. Retain the andi expansion here.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/aarch64/tcg-target.c.inc | 7 ++++++-
8
1 file changed, 6 insertions(+), 1 deletion(-)
9
10
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/aarch64/tcg-target.c.inc
13
+++ b/tcg/aarch64/tcg-target.c.inc
14
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType ext,
15
16
case INDEX_op_extract_i64:
17
case INDEX_op_extract_i32:
18
- tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
19
+ if (a2 == 0) {
20
+ uint64_t mask = MAKE_64BIT_MASK(0, args[3]);
21
+ tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, mask);
22
+ } else {
23
+ tcg_out_ubfm(s, ext, a0, a1, a2, a2 + args[3] - 1);
24
+ }
25
break;
26
27
case INDEX_op_sextract_i64:
28
--
29
2.43.0
30
31
diff view generated by jsdifflib
1
From: Leandro Lupori <leandro.lupori@eldorado.org.br>
1
The armv6 uxt and sxt opcodes have a 2-bit rotate field
2
which supports extractions from ofs = {0,8,16,24}.
3
Special case ofs = 0, len <= 8 as AND.
2
4
3
PowerPC64 processors handle direct branches better than indirect
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
ones, resulting in less stalled cycles and branch misses.
5
6
However, PPC's tb_target_set_jmp_target() was only using direct
7
branches for 16-bit jumps, while PowerPC64's unconditional branch
8
instructions are able to handle displacements of up to 26 bits.
9
To take advantage of this, now jumps whose displacements fit in
10
between 17 and 26 bits are also converted to direct branches.
11
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Leandro Lupori <leandro.lupori@eldorado.org.br>
14
[rth: Expanded some commentary.]
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
7
---
17
tcg/ppc/tcg-target.c.inc | 119 +++++++++++++++++++++++++++++----------
8
tcg/arm/tcg-target-has.h | 21 ++++++++++++++--
18
1 file changed, 88 insertions(+), 31 deletions(-)
9
tcg/arm/tcg-target.c.inc | 54 +++++++++++++++++++++++++++++++++++-----
10
2 files changed, 67 insertions(+), 8 deletions(-)
19
11
20
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
12
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
21
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/ppc/tcg-target.c.inc
14
--- a/tcg/arm/tcg-target-has.h
23
+++ b/tcg/ppc/tcg-target.c.inc
15
+++ b/tcg/arm/tcg-target-has.h
24
@@ -XXX,XX +XXX,XX @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
16
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
25
tcg_out32(s, insn);
17
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
26
}
18
#define TCG_TARGET_HAS_ctpop_i32 0
27
19
#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
28
+static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2)
20
-#define TCG_TARGET_HAS_extract_i32 use_armv7_instructions
21
-#define TCG_TARGET_HAS_sextract_i32 use_armv7_instructions
22
+#define TCG_TARGET_HAS_extract_i32 1
23
+#define TCG_TARGET_HAS_sextract_i32 1
24
#define TCG_TARGET_HAS_extract2_i32 1
25
#define TCG_TARGET_HAS_negsetcond_i32 1
26
#define TCG_TARGET_HAS_mulu2_i32 1
27
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
28
#define TCG_TARGET_HAS_cmpsel_vec 0
29
#define TCG_TARGET_HAS_tst_vec 1
30
31
+static inline bool
32
+tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len)
29
+{
33
+{
30
+ if (HOST_BIG_ENDIAN) {
34
+ if (use_armv7_instructions) {
31
+ return (uint64_t)i1 << 32 | i2;
35
+ return true; /* SBFX or UBFX */
32
+ }
36
+ }
33
+ return (uint64_t)i2 << 32 | i1;
37
+ switch (len) {
38
+ case 8: /* SXTB or UXTB */
39
+ case 16: /* SXTH or UXTH */
40
+ return (ofs % 8) == 0;
41
+ }
42
+ return false;
34
+}
43
+}
35
+
44
+
36
+static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw,
45
+#define TCG_TARGET_extract_valid tcg_target_extract_valid
37
+ tcg_insn_unit i0, tcg_insn_unit i1)
46
+#define TCG_TARGET_sextract_valid tcg_target_extract_valid
38
+{
39
+#if TCG_TARGET_REG_BITS == 64
40
+ qatomic_set((uint64_t *)rw, make_pair(i0, i1));
41
+ flush_idcache_range(rx, rw, 8);
42
+#else
43
+ qemu_build_not_reached();
44
+#endif
45
+}
46
+
47
+
47
+static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw,
48
#endif
48
+ tcg_insn_unit i0, tcg_insn_unit i1,
49
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
49
+ tcg_insn_unit i2, tcg_insn_unit i3)
50
index XXXXXXX..XXXXXXX 100644
50
+{
51
--- a/tcg/arm/tcg-target.c.inc
51
+ uint64_t p[2];
52
+++ b/tcg/arm/tcg-target.c.inc
52
+
53
@@ -XXX,XX +XXX,XX @@ static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
53
+ p[!HOST_BIG_ENDIAN] = make_pair(i0, i1);
54
static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
54
+ p[HOST_BIG_ENDIAN] = make_pair(i2, i3);
55
TCGReg rn, int ofs, int len)
55
+
56
+ /*
57
+ * There's no convenient way to get the compiler to allocate a pair
58
+ * of registers at an even index, so copy into r6/r7 and clobber.
59
+ */
60
+ asm("mr %%r6, %1\n\t"
61
+ "mr %%r7, %2\n\t"
62
+ "stq %%r6, %0"
63
+ : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7");
64
+ flush_idcache_range(rx, rw, 16);
65
+}
66
+
67
void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
68
uintptr_t jmp_rw, uintptr_t addr)
69
{
56
{
70
- if (TCG_TARGET_REG_BITS == 64) {
57
- /* ubfx */
71
- tcg_insn_unit i1, i2;
58
- tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
72
- intptr_t tb_diff = addr - tc_ptr;
59
- | (ofs << 7) | ((len - 1) << 16));
73
- intptr_t br_diff = addr - (jmp_rx + 4);
60
+ /* According to gcc, AND can be faster. */
74
- uint64_t pair;
61
+ if (ofs == 0 && len <= 8) {
75
+ tcg_insn_unit i0, i1, i2, i3;
62
+ tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn,
76
+ intptr_t tb_diff = addr - tc_ptr;
63
+ encode_imm_nofail((1 << len) - 1));
77
+ intptr_t br_diff = addr - (jmp_rx + 4);
78
+ intptr_t lo, hi;
79
80
- /* This does not exercise the range of the branch, but we do
81
- still need to be able to load the new value of TCG_REG_TB.
82
- But this does still happen quite often. */
83
- if (tb_diff == (int16_t)tb_diff) {
84
- i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
85
- i2 = B | (br_diff & 0x3fffffc);
86
- } else {
87
- intptr_t lo = (int16_t)tb_diff;
88
- intptr_t hi = (int32_t)(tb_diff - lo);
89
- assert(tb_diff == hi + lo);
90
- i1 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
91
- i2 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
92
- }
93
-#if HOST_BIG_ENDIAN
94
- pair = (uint64_t)i1 << 32 | i2;
95
-#else
96
- pair = (uint64_t)i2 << 32 | i1;
97
-#endif
98
-
99
- /* As per the enclosing if, this is ppc64. Avoid the _Static_assert
100
- within qatomic_set that would fail to build a ppc32 host. */
101
- qatomic_set__nocheck((uint64_t *)jmp_rw, pair);
102
- flush_idcache_range(jmp_rx, jmp_rw, 8);
103
- } else {
104
+ if (TCG_TARGET_REG_BITS == 32) {
105
intptr_t diff = addr - jmp_rx;
106
tcg_debug_assert(in_range_b(diff));
107
qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
108
flush_idcache_range(jmp_rx, jmp_rw, 4);
109
+ return;
110
}
111
+
112
+ /*
113
+ * For 16-bit displacements, we can use a single add + branch.
114
+ * This happens quite often.
115
+ */
116
+ if (tb_diff == (int16_t)tb_diff) {
117
+ i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
118
+ i1 = B | (br_diff & 0x3fffffc);
119
+ ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
120
+ return;
64
+ return;
121
+ }
65
+ }
122
+
66
+
123
+ lo = (int16_t)tb_diff;
67
+ if (use_armv7_instructions) {
124
+ hi = (int32_t)(tb_diff - lo);
68
+ /* ubfx */
125
+ assert(tb_diff == hi + lo);
69
+ tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
126
+ i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
70
+ | (ofs << 7) | ((len - 1) << 16));
127
+ i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
128
+
129
+ /*
130
+ * Without stq from 2.07, we can only update two insns,
131
+ * and those must be the ones that load the target address.
132
+ */
133
+ if (!have_isa_2_07) {
134
+ ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
135
+ return;
71
+ return;
136
+ }
72
+ }
137
+
73
+
138
+ /*
74
+ assert(ofs % 8 == 0);
139
+ * For 26-bit displacements, we can use a direct branch.
75
+ switch (len) {
140
+ * Otherwise we still need the indirect branch, which we
76
+ case 8:
141
+ * must restore after a potential direct branch write.
77
+ /* uxtb */
142
+ */
78
+ tcg_out32(s, 0x06ef0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
143
+ br_diff -= 4;
79
+ break;
144
+ if (in_range_b(br_diff)) {
80
+ case 16:
145
+ i2 = B | (br_diff & 0x3fffffc);
81
+ /* uxth */
146
+ i3 = NOP;
82
+ tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
147
+ } else {
83
+ break;
148
+ i2 = MTSPR | RS(TCG_REG_TB) | CTR;
84
+ default:
149
+ i3 = BCCTR | BO_ALWAYS;
85
+ g_assert_not_reached();
150
+ }
86
+ }
151
+ ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3);
152
}
87
}
153
88
154
static void tcg_out_call_int(TCGContext *s, int lk,
89
static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
155
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
90
TCGReg rn, int ofs, int len)
156
if (s->tb_jmp_insn_offset) {
91
{
157
/* Direct jump. */
92
- /* sbfx */
158
if (TCG_TARGET_REG_BITS == 64) {
93
- tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
159
- /* Ensure the next insns are 8-byte aligned. */
94
- | (ofs << 7) | ((len - 1) << 16));
160
- if ((uintptr_t)s->code_ptr & 7) {
95
+ if (use_armv7_instructions) {
161
+ /* Ensure the next insns are 8 or 16-byte aligned. */
96
+ /* sbfx */
162
+ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
97
+ tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
163
tcg_out32(s, NOP);
98
+ | (ofs << 7) | ((len - 1) << 16));
164
}
99
+ return;
165
s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
100
+ }
101
+
102
+ assert(ofs % 8 == 0);
103
+ switch (len) {
104
+ case 8:
105
+ /* sxtb */
106
+ tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
107
+ break;
108
+ case 16:
109
+ /* sxth */
110
+ tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | (ofs << 7) | rn);
111
+ break;
112
+ default:
113
+ g_assert_not_reached();
114
+ }
115
}
116
117
+
118
static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
119
TCGReg rd, TCGReg rn, int32_t offset)
120
{
166
--
121
--
167
2.34.1
122
2.43.0
123
124
diff view generated by jsdifflib
New patch
1
Accept byte and word extensions with the extract opcodes.
2
This is preparatory to removing the specialized extracts.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/loongarch64/tcg-target-has.h | 15 ++++++++++++--
8
tcg/loongarch64/tcg-target.c.inc | 34 ++++++++++++++++++++++++++++++--
9
2 files changed, 45 insertions(+), 4 deletions(-)
10
11
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/loongarch64/tcg-target-has.h
14
+++ b/tcg/loongarch64/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
#define TCG_TARGET_HAS_rot_i32 1
17
#define TCG_TARGET_HAS_deposit_i32 1
18
#define TCG_TARGET_HAS_extract_i32 1
19
-#define TCG_TARGET_HAS_sextract_i32 0
20
+#define TCG_TARGET_HAS_sextract_i32 1
21
#define TCG_TARGET_HAS_extract2_i32 0
22
#define TCG_TARGET_HAS_add2_i32 0
23
#define TCG_TARGET_HAS_sub2_i32 0
24
@@ -XXX,XX +XXX,XX @@
25
#define TCG_TARGET_HAS_rot_i64 1
26
#define TCG_TARGET_HAS_deposit_i64 1
27
#define TCG_TARGET_HAS_extract_i64 1
28
-#define TCG_TARGET_HAS_sextract_i64 0
29
+#define TCG_TARGET_HAS_sextract_i64 1
30
#define TCG_TARGET_HAS_extract2_i64 0
31
#define TCG_TARGET_HAS_extr_i64_i32 1
32
#define TCG_TARGET_HAS_ext8s_i64 1
33
@@ -XXX,XX +XXX,XX @@
34
#define TCG_TARGET_HAS_cmpsel_vec 0
35
#define TCG_TARGET_HAS_tst_vec 0
36
37
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
38
+
39
+static inline bool
40
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
41
+{
42
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
43
+ return true;
44
+ }
45
+ return ofs == 0 && (len == 8 || len == 16);
46
+}
47
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
48
49
#endif
50
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
51
index XXXXXXX..XXXXXXX 100644
52
--- a/tcg/loongarch64/tcg-target.c.inc
53
+++ b/tcg/loongarch64/tcg-target.c.inc
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
55
break;
56
57
case INDEX_op_extract_i32:
58
- tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
59
+ if (a2 == 0 && args[3] <= 12) {
60
+ tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
61
+ } else {
62
+ tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
63
+ }
64
break;
65
case INDEX_op_extract_i64:
66
- tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
67
+ if (a2 == 0 && args[3] <= 12) {
68
+ tcg_out_opc_andi(s, a0, a1, (1 << args[3]) - 1);
69
+ } else {
70
+ tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
71
+ }
72
+ break;
73
+
74
+ case INDEX_op_sextract_i64:
75
+ if (a2 + args[3] == 32) {
76
+ if (a2 == 0) {
77
+ tcg_out_ext32s(s, a0, a1);
78
+ } else {
79
+ tcg_out_opc_srai_w(s, a0, a1, a2);
80
+ }
81
+ break;
82
+ }
83
+ /* FALLTHRU */
84
+ case INDEX_op_sextract_i32:
85
+ if (a2 == 0 && args[3] == 8) {
86
+ tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
87
+ } else if (a2 == 0 && args[3] == 16) {
88
+ tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
89
+ } else {
90
+ g_assert_not_reached();
91
+ }
92
break;
93
94
case INDEX_op_deposit_i32:
95
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
96
case INDEX_op_not_i64:
97
case INDEX_op_extract_i32:
98
case INDEX_op_extract_i64:
99
+ case INDEX_op_sextract_i32:
100
+ case INDEX_op_sextract_i64:
101
case INDEX_op_bswap16_i32:
102
case INDEX_op_bswap16_i64:
103
case INDEX_op_bswap32_i32:
104
--
105
2.43.0
106
107
diff view generated by jsdifflib
New patch
1
Accept AND, ext32u, ext32s extensions with the extract opcodes.
2
This is preparatory to removing the specialized extracts.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/mips/tcg-target-has.h | 26 ++++++++++++++++++++++----
8
tcg/mips/tcg-target.c.inc | 33 ++++++++++++++++++++++++++++++---
9
2 files changed, 52 insertions(+), 7 deletions(-)
10
11
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/mips/tcg-target-has.h
14
+++ b/tcg/mips/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
16
17
/* optional instructions detected at runtime */
18
#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
19
-#define TCG_TARGET_HAS_extract_i32 use_mips32r2_instructions
20
-#define TCG_TARGET_HAS_sextract_i32 0
21
+#define TCG_TARGET_HAS_extract_i32 1
22
+#define TCG_TARGET_HAS_sextract_i32 1
23
#define TCG_TARGET_HAS_extract2_i32 0
24
#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
25
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
26
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
27
#define TCG_TARGET_HAS_bswap32_i64 1
28
#define TCG_TARGET_HAS_bswap64_i64 1
29
#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
30
-#define TCG_TARGET_HAS_extract_i64 use_mips32r2_instructions
31
-#define TCG_TARGET_HAS_sextract_i64 0
32
+#define TCG_TARGET_HAS_extract_i64 1
33
+#define TCG_TARGET_HAS_sextract_i64 1
34
#define TCG_TARGET_HAS_extract2_i64 0
35
#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
36
#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
37
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
38
#define TCG_TARGET_HAS_qemu_ldst_i128 0
39
#define TCG_TARGET_HAS_tst 0
40
41
+#define TCG_TARGET_extract_valid(type, ofs, len) use_mips32r2_instructions
42
+
43
+static inline bool
44
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
45
+{
46
+ if (ofs == 0) {
47
+ switch (len) {
48
+ case 8:
49
+ case 16:
50
+ return use_mips32r2_instructions;
51
+ case 32:
52
+ return type == TCG_TYPE_I64;
53
+ }
54
+ }
55
+ return false;
56
+}
57
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
58
+
59
#endif
60
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/mips/tcg-target.c.inc
63
+++ b/tcg/mips/tcg-target.c.inc
64
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
65
tcg_out_opc_bf64(s, OPC_DINS, OPC_DINSM, OPC_DINSU, a0, a2,
66
args[3] + args[4] - 1, args[3]);
67
break;
68
+
69
case INDEX_op_extract_i32:
70
- tcg_out_opc_bf(s, OPC_EXT, a0, a1, args[3] - 1, a2);
71
+ if (a2 == 0 && args[3] <= 16) {
72
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, (1 << args[3]) - 1);
73
+ } else {
74
+ tcg_out_opc_bf(s, OPC_EXT, a0, a1, args[3] - 1, a2);
75
+ }
76
break;
77
case INDEX_op_extract_i64:
78
- tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU, a0, a1,
79
- args[3] - 1, a2);
80
+ if (a2 == 0 && args[3] <= 16) {
81
+ tcg_out_opc_imm(s, OPC_ANDI, a0, a1, (1 << args[3]) - 1);
82
+ } else {
83
+ tcg_out_opc_bf64(s, OPC_DEXT, OPC_DEXTM, OPC_DEXTU,
84
+ a0, a1, args[3] - 1, a2);
85
+ }
86
+ break;
87
+
88
+ case INDEX_op_sextract_i64:
89
+ if (a2 == 0 && args[3] == 32) {
90
+ tcg_out_ext32s(s, a0, a1);
91
+ break;
92
+ }
93
+ /* FALLTHRU */
94
+ case INDEX_op_sextract_i32:
95
+ if (a2 == 0 && args[3] == 8) {
96
+ tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
97
+ } else if (a2 == 0 && args[3] == 16) {
98
+ tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
99
+ } else {
100
+ g_assert_not_reached();
101
+ }
102
break;
103
104
case INDEX_op_brcond_i32:
105
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
106
case INDEX_op_ext8s_i32:
107
case INDEX_op_ext16s_i32:
108
case INDEX_op_extract_i32:
109
+ case INDEX_op_sextract_i32:
110
case INDEX_op_ld8u_i64:
111
case INDEX_op_ld8s_i64:
112
case INDEX_op_ld16u_i64:
113
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
114
case INDEX_op_extrl_i64_i32:
115
case INDEX_op_extrh_i64_i32:
116
case INDEX_op_extract_i64:
117
+ case INDEX_op_sextract_i64:
118
return C_O1_I1(r, r);
119
120
case INDEX_op_st8_i32:
121
--
122
2.43.0
123
124
diff view generated by jsdifflib
New patch
1
Accept byte and word extensions with the extract opcodes.
2
This is preparatory to removing the specialized extracts.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/ppc/tcg-target-has.h | 16 ++++++++++++++--
8
tcg/ppc/tcg-target.c.inc | 30 ++++++++++++++++++++++++++++++
9
2 files changed, 44 insertions(+), 2 deletions(-)
10
11
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/ppc/tcg-target-has.h
14
+++ b/tcg/ppc/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
17
#define TCG_TARGET_HAS_deposit_i32 1
18
#define TCG_TARGET_HAS_extract_i32 1
19
-#define TCG_TARGET_HAS_sextract_i32 0
20
+#define TCG_TARGET_HAS_sextract_i32 1
21
#define TCG_TARGET_HAS_extract2_i32 0
22
#define TCG_TARGET_HAS_negsetcond_i32 1
23
#define TCG_TARGET_HAS_mulu2_i32 0
24
@@ -XXX,XX +XXX,XX @@
25
#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
26
#define TCG_TARGET_HAS_deposit_i64 1
27
#define TCG_TARGET_HAS_extract_i64 1
28
-#define TCG_TARGET_HAS_sextract_i64 0
29
+#define TCG_TARGET_HAS_sextract_i64 1
30
#define TCG_TARGET_HAS_extract2_i64 0
31
#define TCG_TARGET_HAS_negsetcond_i64 1
32
#define TCG_TARGET_HAS_add2_i64 1
33
@@ -XXX,XX +XXX,XX @@
34
#define TCG_TARGET_HAS_cmpsel_vec 1
35
#define TCG_TARGET_HAS_tst_vec 0
36
37
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
38
+
39
+static inline bool
40
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
41
+{
42
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
43
+ return true;
44
+ }
45
+ return ofs == 0 && (len == 8 || len == 16);
46
+}
47
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
48
+
49
#endif
50
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
51
index XXXXXXX..XXXXXXX 100644
52
--- a/tcg/ppc/tcg-target.c.inc
53
+++ b/tcg/ppc/tcg-target.c.inc
54
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
55
break;
56
57
case INDEX_op_extract_i32:
58
+ if (args[2] == 0 && args[3] <= 16) {
59
+ tcg_out32(s, ANDI | SAI(args[1], args[0], (1 << args[3]) - 1));
60
+ break;
61
+ }
62
tcg_out_rlw(s, RLWINM, args[0], args[1],
63
32 - args[2], 32 - args[3], 31);
64
break;
65
case INDEX_op_extract_i64:
66
+ if (args[2] == 0 && args[3] <= 16) {
67
+ tcg_out32(s, ANDI | SAI(args[1], args[0], (1 << args[3]) - 1));
68
+ break;
69
+ }
70
tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
71
break;
72
73
+ case INDEX_op_sextract_i64:
74
+ if (args[2] + args[3] == 32) {
75
+ if (args[2] == 0) {
76
+ tcg_out_ext32s(s, args[0], args[1]);
77
+ } else {
78
+ tcg_out_sari32(s, args[0], args[1], args[2]);
79
+ }
80
+ break;
81
+ }
82
+ /* FALLTHRU */
83
+ case INDEX_op_sextract_i32:
84
+ if (args[2] == 0 && args[3] == 8) {
85
+ tcg_out_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
86
+ } else if (args[2] == 0 && args[3] == 16) {
87
+ tcg_out_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
88
+ } else {
89
+ g_assert_not_reached();
90
+ }
91
+ break;
92
+
93
case INDEX_op_movcond_i32:
94
tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
95
args[3], args[4], const_args[2]);
96
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
97
case INDEX_op_bswap16_i32:
98
case INDEX_op_bswap32_i32:
99
case INDEX_op_extract_i32:
100
+ case INDEX_op_sextract_i32:
101
case INDEX_op_ld8u_i64:
102
case INDEX_op_ld8s_i64:
103
case INDEX_op_ld16u_i64:
104
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
105
case INDEX_op_bswap32_i64:
106
case INDEX_op_bswap64_i64:
107
case INDEX_op_extract_i64:
108
+ case INDEX_op_sextract_i64:
109
return C_O1_I1(r, r);
110
111
case INDEX_op_st8_i32:
112
--
113
2.43.0
114
115
diff view generated by jsdifflib
New patch
1
Accept byte and word extensions with the extract opcodes.
2
This is preparatory to removing the specialized extracts.
1
3
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
7
tcg/riscv/tcg-target-has.h | 39 ++++++++++++++++++++++++++++++++++----
8
tcg/riscv/tcg-target.c.inc | 34 +++++++++++++++++++++++++++++++++
9
2 files changed, 69 insertions(+), 4 deletions(-)
10
11
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
12
index XXXXXXX..XXXXXXX 100644
13
--- a/tcg/riscv/tcg-target-has.h
14
+++ b/tcg/riscv/tcg-target-has.h
15
@@ -XXX,XX +XXX,XX @@
16
#define TCG_TARGET_HAS_div2_i32 0
17
#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
18
#define TCG_TARGET_HAS_deposit_i32 0
19
-#define TCG_TARGET_HAS_extract_i32 0
20
-#define TCG_TARGET_HAS_sextract_i32 0
21
+#define TCG_TARGET_HAS_extract_i32 1
22
+#define TCG_TARGET_HAS_sextract_i32 1
23
#define TCG_TARGET_HAS_extract2_i32 0
24
#define TCG_TARGET_HAS_add2_i32 1
25
#define TCG_TARGET_HAS_sub2_i32 1
26
@@ -XXX,XX +XXX,XX @@
27
#define TCG_TARGET_HAS_div2_i64 0
28
#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
29
#define TCG_TARGET_HAS_deposit_i64 0
30
-#define TCG_TARGET_HAS_extract_i64 0
31
-#define TCG_TARGET_HAS_sextract_i64 0
32
+#define TCG_TARGET_HAS_extract_i64 1
33
+#define TCG_TARGET_HAS_sextract_i64 1
34
#define TCG_TARGET_HAS_extract2_i64 0
35
#define TCG_TARGET_HAS_extr_i64_i32 1
36
#define TCG_TARGET_HAS_ext8s_i64 1
37
@@ -XXX,XX +XXX,XX @@
38
39
#define TCG_TARGET_HAS_tst_vec 0
40
41
+static inline bool
42
+tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len)
43
+{
44
+ if (ofs == 0) {
45
+ switch (len) {
46
+ case 16:
47
+ return cpuinfo & CPUINFO_ZBB;
48
+ case 32:
49
+ return (cpuinfo & CPUINFO_ZBA) && type == TCG_TYPE_I64;
50
+ }
51
+ }
52
+ return false;
53
+}
54
+#define TCG_TARGET_extract_valid tcg_target_extract_valid
55
+
56
+static inline bool
57
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
58
+{
59
+ if (ofs == 0) {
60
+ switch (len) {
61
+ case 8:
62
+ case 16:
63
+ return cpuinfo & CPUINFO_ZBB;
64
+ case 32:
65
+ return type == TCG_TYPE_I64;
66
+ }
67
+ }
68
+ return false;
69
+}
70
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
71
+
72
#endif
73
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/riscv/tcg-target.c.inc
76
+++ b/tcg/riscv/tcg-target.c.inc
77
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
78
tcg_out_mb(s, a0);
79
break;
80
81
+ case INDEX_op_extract_i64:
82
+ if (a2 == 0 && args[3] == 32) {
83
+ tcg_out_ext32u(s, a0, a1);
84
+ break;
85
+ }
86
+ /* FALLTHRU */
87
+ case INDEX_op_extract_i32:
88
+ if (a2 == 0 && args[3] == 16) {
89
+ tcg_out_ext16u(s, a0, a1);
90
+ } else {
91
+ g_assert_not_reached();
92
+ }
93
+ break;
94
+
95
+ case INDEX_op_sextract_i64:
96
+ if (a2 == 0 && args[3] == 32) {
97
+ tcg_out_ext32s(s, a0, a1);
98
+ break;
99
+ }
100
+ /* FALLTHRU */
101
+ case INDEX_op_sextract_i32:
102
+ if (a2 == 0 && args[3] == 8) {
103
+ tcg_out_ext8s(s, TCG_TYPE_REG, a0, a1);
104
+ } else if (a2 == 0 && args[3] == 16) {
105
+ tcg_out_ext16s(s, TCG_TYPE_REG, a0, a1);
106
+ } else {
107
+ g_assert_not_reached();
108
+ }
109
+ break;
110
+
111
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
112
case INDEX_op_mov_i64:
113
case INDEX_op_call: /* Always emitted via tcg_out_call. */
114
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
115
case INDEX_op_extrl_i64_i32:
116
case INDEX_op_extrh_i64_i32:
117
case INDEX_op_ext_i32_i64:
118
+ case INDEX_op_extract_i32:
119
+ case INDEX_op_extract_i64:
120
+ case INDEX_op_sextract_i32:
121
+ case INDEX_op_sextract_i64:
122
case INDEX_op_bswap16_i32:
123
case INDEX_op_bswap32_i32:
124
case INDEX_op_bswap16_i64:
125
--
126
2.43.0
127
128
diff view generated by jsdifflib
New patch
1
Extracts which abut bit 32 may use 32-bit shifts.
1
2
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
---
6
tcg/riscv/tcg-target-has.h | 24 +++++++-----------------
7
tcg/riscv/tcg-target.c.inc | 16 ++++++++++++----
8
2 files changed, 19 insertions(+), 21 deletions(-)
9
10
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/tcg/riscv/tcg-target-has.h
13
+++ b/tcg/riscv/tcg-target-has.h
14
@@ -XXX,XX +XXX,XX @@
15
static inline bool
16
tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len)
17
{
18
- if (ofs == 0) {
19
- switch (len) {
20
- case 16:
21
- return cpuinfo & CPUINFO_ZBB;
22
- case 32:
23
- return (cpuinfo & CPUINFO_ZBA) && type == TCG_TYPE_I64;
24
- }
25
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
26
+ /* ofs > 0 uses SRLIW; ofs == 0 uses add.uw. */
27
+ return ofs || (cpuinfo & CPUINFO_ZBA);
28
}
29
- return false;
30
+ return (cpuinfo & CPUINFO_ZBB) && ofs == 0 && len == 16;
31
}
32
#define TCG_TARGET_extract_valid tcg_target_extract_valid
33
34
static inline bool
35
tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
36
{
37
- if (ofs == 0) {
38
- switch (len) {
39
- case 8:
40
- case 16:
41
- return cpuinfo & CPUINFO_ZBB;
42
- case 32:
43
- return type == TCG_TYPE_I64;
44
- }
45
+ if (type == TCG_TYPE_I64 && ofs + len == 32) {
46
+ return true;
47
}
48
- return false;
49
+ return (cpuinfo & CPUINFO_ZBB) && ofs == 0 && (len == 8 || len == 16);
50
}
51
#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
52
53
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
54
index XXXXXXX..XXXXXXX 100644
55
--- a/tcg/riscv/tcg-target.c.inc
56
+++ b/tcg/riscv/tcg-target.c.inc
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
58
break;
59
60
case INDEX_op_extract_i64:
61
- if (a2 == 0 && args[3] == 32) {
62
- tcg_out_ext32u(s, a0, a1);
63
+ if (a2 + args[3] == 32) {
64
+ if (a2 == 0) {
65
+ tcg_out_ext32u(s, a0, a1);
66
+ } else {
67
+ tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2);
68
+ }
69
break;
70
}
71
/* FALLTHRU */
72
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
73
break;
74
75
case INDEX_op_sextract_i64:
76
- if (a2 == 0 && args[3] == 32) {
77
- tcg_out_ext32s(s, a0, a1);
78
+ if (a2 + args[3] == 32) {
79
+ if (a2 == 0) {
80
+ tcg_out_ext32s(s, a0, a1);
81
+ } else {
82
+ tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2);
83
+ }
84
break;
85
}
86
/* FALLTHRU */
87
--
88
2.43.0
89
90
diff view generated by jsdifflib
1
Now that we have collected all of the page data into
1
Accept byte and word extensions with the extract opcodes.
2
CPUTLBEntryFull, provide an interface to record that
2
This is preparatory to removing the specialized extracts.
3
all in one go, instead of using 4 arguments. This interface
4
allows CPUTLBEntryFull to be extended without having to
5
change the number of arguments.
6
3
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
9
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
include/exec/cpu-defs.h | 14 +++++++++++
7
tcg/s390x/tcg-target-has.h | 22 ++++++++++++++++++++--
13
include/exec/exec-all.h | 22 ++++++++++++++++++
8
tcg/s390x/tcg-target.c.inc | 37 +++++++++++++++++++++++++++++++++++++
14
accel/tcg/cputlb.c | 51 ++++++++++++++++++++++++++---------------
9
2 files changed, 57 insertions(+), 2 deletions(-)
15
3 files changed, 69 insertions(+), 18 deletions(-)
16
10
17
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
11
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
18
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-defs.h
13
--- a/tcg/s390x/tcg-target-has.h
20
+++ b/include/exec/cpu-defs.h
14
+++ b/tcg/s390x/tcg-target-has.h
21
@@ -XXX,XX +XXX,XX @@ typedef struct CPUTLBEntryFull {
15
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
22
* + the offset within the target MemoryRegion (otherwise)
16
#define TCG_TARGET_HAS_ctpop_i32 1
23
*/
17
#define TCG_TARGET_HAS_deposit_i32 1
24
hwaddr xlat_section;
18
#define TCG_TARGET_HAS_extract_i32 1
19
-#define TCG_TARGET_HAS_sextract_i32 0
20
+#define TCG_TARGET_HAS_sextract_i32 1
21
#define TCG_TARGET_HAS_extract2_i32 0
22
#define TCG_TARGET_HAS_negsetcond_i32 1
23
#define TCG_TARGET_HAS_add2_i32 1
24
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
25
#define TCG_TARGET_HAS_ctpop_i64 1
26
#define TCG_TARGET_HAS_deposit_i64 1
27
#define TCG_TARGET_HAS_extract_i64 1
28
-#define TCG_TARGET_HAS_sextract_i64 0
29
+#define TCG_TARGET_HAS_sextract_i64 1
30
#define TCG_TARGET_HAS_extract2_i64 0
31
#define TCG_TARGET_HAS_negsetcond_i64 1
32
#define TCG_TARGET_HAS_add2_i64 1
33
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
34
#define TCG_TARGET_HAS_cmpsel_vec 1
35
#define TCG_TARGET_HAS_tst_vec 0
36
37
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
25
+
38
+
26
+ /*
39
+static inline bool
27
+ * @phys_addr contains the physical address in the address space
40
+tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
28
+ * given by cpu_asidx_from_attrs(cpu, @attrs).
41
+{
29
+ */
42
+ if (ofs == 0) {
30
+ hwaddr phys_addr;
43
+ switch (len) {
44
+ case 8:
45
+ case 16:
46
+ return true;
47
+ case 32:
48
+ return type == TCG_TYPE_I64;
49
+ }
50
+ }
51
+ return false;
52
+}
53
+#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
31
+
54
+
32
+ /* @attrs contains the memory transaction attributes for the page. */
55
#endif
33
MemTxAttrs attrs;
56
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
34
+
35
+ /* @prot contains the complete protections for the page. */
36
+ uint8_t prot;
37
+
38
+ /* @lg_page_size contains the log2 of the page size. */
39
+ uint8_t lg_page_size;
40
} CPUTLBEntryFull;
41
42
/*
43
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
44
index XXXXXXX..XXXXXXX 100644
57
index XXXXXXX..XXXXXXX 100644
45
--- a/include/exec/exec-all.h
58
--- a/tcg/s390x/tcg-target.c.inc
46
+++ b/include/exec/exec-all.h
59
+++ b/tcg/s390x/tcg-target.c.inc
47
@@ -XXX,XX +XXX,XX @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
60
@@ -XXX,XX +XXX,XX @@ static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
48
uint16_t idxmap,
61
static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
49
unsigned bits);
62
int ofs, int len)
50
63
{
51
+/**
64
+ if (ofs == 0) {
52
+ * tlb_set_page_full:
65
+ switch (len) {
53
+ * @cpu: CPU context
66
+ case 8:
54
+ * @mmu_idx: mmu index of the tlb to modify
67
+ tcg_out_ext8u(s, dest, src);
55
+ * @vaddr: virtual address of the entry to add
68
+ return;
56
+ * @full: the details of the tlb entry
69
+ case 16:
57
+ *
70
+ tcg_out_ext16u(s, dest, src);
58
+ * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
71
+ return;
59
+ * @full must be filled, except for xlat_section, and constitute
72
+ case 32:
60
+ * the complete description of the translated page.
73
+ tcg_out_ext32u(s, dest, src);
61
+ *
74
+ return;
62
+ * This is generally called by the target tlb_fill function after
75
+ }
63
+ * having performed a successful page table walk to find the physical
76
+ }
64
+ * address and attributes for the translation.
77
tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
65
+ *
66
+ * At most one entry for a given virtual address is permitted. Only a
67
+ * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
68
+ * used by tlb_flush_page.
69
+ */
70
+void tlb_set_page_full(CPUState *cpu, int mmu_idx, target_ulong vaddr,
71
+ CPUTLBEntryFull *full);
72
+
73
/**
74
* tlb_set_page_with_attrs:
75
* @cpu: CPU to add this TLB entry for
76
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/accel/tcg/cputlb.c
79
+++ b/accel/tcg/cputlb.c
80
@@ -XXX,XX +XXX,XX @@ static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
81
env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
82
}
78
}
83
79
84
-/* Add a new TLB entry. At most one entry for a given virtual address
80
+static void tgen_sextract(TCGContext *s, TCGReg dest, TCGReg src,
85
+/*
81
+ int ofs, int len)
86
+ * Add a new TLB entry. At most one entry for a given virtual address
87
* is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
88
* supplied size is only used by tlb_flush_page.
89
*
90
* Called from TCG-generated code, which is under an RCU read-side
91
* critical section.
92
*/
93
-void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
94
- hwaddr paddr, MemTxAttrs attrs, int prot,
95
- int mmu_idx, target_ulong size)
96
+void tlb_set_page_full(CPUState *cpu, int mmu_idx,
97
+ target_ulong vaddr, CPUTLBEntryFull *full)
98
{
99
CPUArchState *env = cpu->env_ptr;
100
CPUTLB *tlb = env_tlb(env);
101
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
102
CPUTLBEntry *te, tn;
103
hwaddr iotlb, xlat, sz, paddr_page;
104
target_ulong vaddr_page;
105
- int asidx = cpu_asidx_from_attrs(cpu, attrs);
106
- int wp_flags;
107
+ int asidx, wp_flags, prot;
108
bool is_ram, is_romd;
109
110
assert_cpu_is_self(cpu);
111
112
- if (size <= TARGET_PAGE_SIZE) {
113
+ if (full->lg_page_size <= TARGET_PAGE_BITS) {
114
sz = TARGET_PAGE_SIZE;
115
} else {
116
- tlb_add_large_page(env, mmu_idx, vaddr, size);
117
- sz = size;
118
+ sz = (hwaddr)1 << full->lg_page_size;
119
+ tlb_add_large_page(env, mmu_idx, vaddr, sz);
120
}
121
vaddr_page = vaddr & TARGET_PAGE_MASK;
122
- paddr_page = paddr & TARGET_PAGE_MASK;
123
+ paddr_page = full->phys_addr & TARGET_PAGE_MASK;
124
125
+ prot = full->prot;
126
+ asidx = cpu_asidx_from_attrs(cpu, full->attrs);
127
section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
128
- &xlat, &sz, attrs, &prot);
129
+ &xlat, &sz, full->attrs, &prot);
130
assert(sz >= TARGET_PAGE_SIZE);
131
132
tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
133
" prot=%x idx=%d\n",
134
- vaddr, paddr, prot, mmu_idx);
135
+ vaddr, full->phys_addr, prot, mmu_idx);
136
137
address = vaddr_page;
138
- if (size < TARGET_PAGE_SIZE) {
139
+ if (full->lg_page_size < TARGET_PAGE_BITS) {
140
/* Repeat the MMU check and TLB fill on every access. */
141
address |= TLB_INVALID_MASK;
142
}
143
- if (attrs.byte_swap) {
144
+ if (full->attrs.byte_swap) {
145
address |= TLB_BSWAP;
146
}
147
148
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
149
* subtract here is that of the page base, and not the same as the
150
* vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
151
*/
152
+ desc->fulltlb[index] = *full;
153
desc->fulltlb[index].xlat_section = iotlb - vaddr_page;
154
- desc->fulltlb[index].attrs = attrs;
155
+ desc->fulltlb[index].phys_addr = paddr_page;
156
+ desc->fulltlb[index].prot = prot;
157
158
/* Now calculate the new entry */
159
tn.addend = addend - vaddr_page;
160
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
161
qemu_spin_unlock(&tlb->c.lock);
162
}
163
164
-/* Add a new TLB entry, but without specifying the memory
165
- * transaction attributes to be used.
166
- */
167
+void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
168
+ hwaddr paddr, MemTxAttrs attrs, int prot,
169
+ int mmu_idx, target_ulong size)
170
+{
82
+{
171
+ CPUTLBEntryFull full = {
83
+ if (ofs == 0) {
172
+ .phys_addr = paddr,
84
+ switch (len) {
173
+ .attrs = attrs,
85
+ case 8:
174
+ .prot = prot,
86
+ tcg_out_ext8s(s, TCG_TYPE_REG, dest, src);
175
+ .lg_page_size = ctz64(size)
87
+ return;
176
+ };
88
+ case 16:
177
+
89
+ tcg_out_ext16s(s, TCG_TYPE_REG, dest, src);
178
+ assert(is_power_of_2(size));
90
+ return;
179
+ tlb_set_page_full(cpu, mmu_idx, vaddr, &full);
91
+ case 32:
92
+ tcg_out_ext32s(s, dest, src);
93
+ return;
94
+ }
95
+ }
96
+ g_assert_not_reached();
180
+}
97
+}
181
+
98
+
182
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
99
static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
183
hwaddr paddr, int prot,
100
{
184
int mmu_idx, target_ulong size)
101
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
102
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
103
OP_32_64(extract):
104
tgen_extract(s, args[0], args[1], args[2], args[3]);
105
break;
106
+ OP_32_64(sextract):
107
+ tgen_sextract(s, args[0], args[1], args[2], args[3]);
108
+ break;
109
110
case INDEX_op_clz_i64:
111
tgen_clz(s, args[0], args[1], args[2], const_args[2]);
112
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
113
case INDEX_op_extu_i32_i64:
114
case INDEX_op_extract_i32:
115
case INDEX_op_extract_i64:
116
+ case INDEX_op_sextract_i32:
117
+ case INDEX_op_sextract_i64:
118
case INDEX_op_ctpop_i32:
119
case INDEX_op_ctpop_i64:
120
return C_O1_I1(r, r);
185
--
121
--
186
2.34.1
122
2.43.0
187
123
188
124
diff view generated by jsdifflib
1
Use the pc coming from db->pc_first rather than the TB.
1
Extracts which abut bit 32 may use 32-bit shifts.
2
2
3
Use the cached host_addr rather than re-computing for the
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
first page. We still need a separate lookup for the second
5
page because it won't be computed for DisasContextBase until
6
the translator actually performs a read from the page.
7
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
5
---
11
include/exec/plugin-gen.h | 7 ++++---
6
tcg/sparc64/tcg-target-has.h | 13 +++++++++----
12
accel/tcg/plugin-gen.c | 22 +++++++++++-----------
7
tcg/sparc64/tcg-target.c.inc | 11 +++++++++++
13
accel/tcg/translator.c | 2 +-
8
2 files changed, 20 insertions(+), 4 deletions(-)
14
3 files changed, 16 insertions(+), 15 deletions(-)
15
9
16
diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h
10
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/include/exec/plugin-gen.h
12
--- a/tcg/sparc64/tcg-target-has.h
19
+++ b/include/exec/plugin-gen.h
13
+++ b/tcg/sparc64/tcg-target-has.h
20
@@ -XXX,XX +XXX,XX @@ struct DisasContextBase;
14
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
21
15
#define TCG_TARGET_HAS_ctz_i32 0
22
#ifdef CONFIG_PLUGIN
16
#define TCG_TARGET_HAS_ctpop_i32 0
23
17
#define TCG_TARGET_HAS_deposit_i32 0
24
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress);
18
-#define TCG_TARGET_HAS_extract_i32 0
25
+bool plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db,
19
-#define TCG_TARGET_HAS_sextract_i32 0
26
+ bool supress);
20
+#define TCG_TARGET_HAS_extract_i32 1
27
void plugin_gen_tb_end(CPUState *cpu);
21
+#define TCG_TARGET_HAS_sextract_i32 1
28
void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db);
22
#define TCG_TARGET_HAS_extract2_i32 0
29
void plugin_gen_insn_end(void);
23
#define TCG_TARGET_HAS_negsetcond_i32 1
30
@@ -XXX,XX +XXX,XX @@ static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size)
24
#define TCG_TARGET_HAS_add2_i32 1
31
25
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
32
#else /* !CONFIG_PLUGIN */
26
#define TCG_TARGET_HAS_ctz_i64 0
33
27
#define TCG_TARGET_HAS_ctpop_i64 0
34
-static inline
28
#define TCG_TARGET_HAS_deposit_i64 0
35
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool supress)
29
-#define TCG_TARGET_HAS_extract_i64 0
36
+static inline bool
30
-#define TCG_TARGET_HAS_sextract_i64 0
37
+plugin_gen_tb_start(CPUState *cpu, const struct DisasContextBase *db, bool sup)
31
+#define TCG_TARGET_HAS_extract_i64 1
38
{
32
+#define TCG_TARGET_HAS_sextract_i64 1
39
return false;
33
#define TCG_TARGET_HAS_extract2_i64 0
40
}
34
#define TCG_TARGET_HAS_negsetcond_i64 1
41
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
35
#define TCG_TARGET_HAS_add2_i64 1
36
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
37
38
#define TCG_TARGET_HAS_tst 1
39
40
+#define TCG_TARGET_extract_valid(type, ofs, len) \
41
+ ((type) == TCG_TYPE_I64 && (ofs) + (len) == 32)
42
+
43
+#define TCG_TARGET_sextract_valid TCG_TARGET_extract_valid
44
+
45
#endif
46
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
42
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
43
--- a/accel/tcg/plugin-gen.c
48
--- a/tcg/sparc64/tcg-target.c.inc
44
+++ b/accel/tcg/plugin-gen.c
49
+++ b/tcg/sparc64/tcg-target.c.inc
45
@@ -XXX,XX +XXX,XX @@ static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb)
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
46
pr_ops();
51
tcg_out_mb(s, a0);
47
}
52
break;
48
53
49
-bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_only)
54
+ case INDEX_op_extract_i64:
50
+bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db,
55
+ tcg_debug_assert(a2 + args[3] == 32);
51
+ bool mem_only)
56
+ tcg_out_arithi(s, a0, a1, a2, SHIFT_SRL);
52
{
57
+ break;
53
bool ret = false;
58
+ case INDEX_op_sextract_i64:
54
59
+ tcg_debug_assert(a2 + args[3] == 32);
55
@@ -XXX,XX +XXX,XX @@ bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_onl
60
+ tcg_out_arithi(s, a0, a1, a2, SHIFT_SRA);
56
61
+ break;
57
ret = true;
62
+
58
63
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
59
- ptb->vaddr = tb->pc;
64
case INDEX_op_mov_i64:
60
+ ptb->vaddr = db->pc_first;
65
case INDEX_op_call: /* Always emitted via tcg_out_call. */
61
ptb->vaddr2 = -1;
66
@@ -XXX,XX +XXX,XX @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
62
- get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1);
67
case INDEX_op_ext32u_i64:
63
+ ptb->haddr1 = db->host_addr[0];
68
case INDEX_op_ext_i32_i64:
64
ptb->haddr2 = NULL;
69
case INDEX_op_extu_i32_i64:
65
ptb->mem_only = mem_only;
70
+ case INDEX_op_extract_i64:
66
71
+ case INDEX_op_sextract_i64:
67
@@ -XXX,XX +XXX,XX @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
72
case INDEX_op_qemu_ld_a32_i32:
68
* Note that we skip this when haddr1 == NULL, e.g. when we're
73
case INDEX_op_qemu_ld_a64_i32:
69
* fetching instructions from a region not backed by RAM.
74
case INDEX_op_qemu_ld_a32_i64:
70
*/
71
- if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) &&
72
- unlikely((db->pc_next & TARGET_PAGE_MASK) !=
73
- (db->pc_first & TARGET_PAGE_MASK))) {
74
- get_page_addr_code_hostp(cpu->env_ptr, db->pc_next,
75
- &ptb->haddr2);
76
- ptb->vaddr2 = db->pc_next;
77
- }
78
- if (likely(ptb->vaddr2 == -1)) {
79
+ if (ptb->haddr1 == NULL) {
80
+ pinsn->haddr = NULL;
81
+ } else if (is_same_page(db, db->pc_next)) {
82
pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr;
83
} else {
84
+ if (ptb->vaddr2 == -1) {
85
+ ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
86
+ get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
87
+ }
88
pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
89
}
90
}
91
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/accel/tcg/translator.c
94
+++ b/accel/tcg/translator.c
95
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
96
ops->tb_start(db, cpu);
97
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
98
99
- plugin_enabled = plugin_gen_tb_start(cpu, tb, cflags & CF_MEMI_ONLY);
100
+ plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
101
102
while (true) {
103
db->num_insns++;
104
--
75
--
105
2.34.1
76
2.43.0
106
77
107
78
diff view generated by jsdifflib
1
When PAGE_WRITE_INV is set when calling tlb_set_page,
1
Trivially mirrors TCG_TARGET_HAS_{s}extract_*.
2
we immediately set TLB_INVALID_MASK in order to force
3
tlb_fill to be called on the next lookup. Here in
4
probe_access_internal, we have just called tlb_fill
5
and eliminated true misses, thus the lookup must be valid.
6
2
7
This allows us to remove a warning comment from s390x.
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
8
There doesn't seem to be a reason to change the code though.
9
10
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
11
Reviewed-by: David Hildenbrand <david@redhat.com>
12
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
5
---
15
accel/tcg/cputlb.c | 10 +++++++++-
6
tcg/tci/tcg-target-has.h | 3 +++
16
target/s390x/tcg/mem_helper.c | 4 ----
7
1 file changed, 3 insertions(+)
17
2 files changed, 9 insertions(+), 5 deletions(-)
18
8
19
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
20
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
21
--- a/accel/tcg/cputlb.c
11
--- a/tcg/tci/tcg-target-has.h
22
+++ b/accel/tcg/cputlb.c
12
+++ b/tcg/tci/tcg-target-has.h
23
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
13
@@ -XXX,XX +XXX,XX @@
24
}
14
25
tlb_addr = tlb_read_ofs(entry, elt_ofs);
15
#define TCG_TARGET_HAS_tst 1
26
16
27
+ flags = TLB_FLAGS_MASK;
17
+#define TCG_TARGET_extract_valid(type, ofs, len) 1
28
page_addr = addr & TARGET_PAGE_MASK;
18
+#define TCG_TARGET_sextract_valid(type, ofs, len) 1
29
if (!tlb_hit_page(tlb_addr, page_addr)) {
30
if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
31
@@ -XXX,XX +XXX,XX @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
32
33
/* TLB resize via tlb_fill may have moved the entry. */
34
entry = tlb_entry(env, mmu_idx, addr);
35
+
19
+
36
+ /*
20
#endif
37
+ * With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
38
+ * to force the next access through tlb_fill. We've just
39
+ * called tlb_fill, so we know that this entry *is* valid.
40
+ */
41
+ flags &= ~TLB_INVALID_MASK;
42
}
43
tlb_addr = tlb_read_ofs(entry, elt_ofs);
44
}
45
- flags = tlb_addr & TLB_FLAGS_MASK;
46
+ flags &= tlb_addr;
47
48
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
49
if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
50
diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/s390x/tcg/mem_helper.c
53
+++ b/target/s390x/tcg/mem_helper.c
54
@@ -XXX,XX +XXX,XX @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
55
#else
56
int flags;
57
58
- /*
59
- * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL
60
- * to detect if there was an exception during tlb_fill().
61
- */
62
env->tlb_fill_exc = 0;
63
flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost,
64
ra);
65
--
21
--
66
2.34.1
22
2.43.0
67
23
68
24
diff view generated by jsdifflib
1
This field is only written, not read; remove it.
1
We already have these assertions during opcode creation.
2
2
3
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
include/hw/core/cpu.h | 1 -
6
tcg/tci/tcg-target.c.inc | 20 ++------------------
9
accel/tcg/cputlb.c | 7 +++----
7
1 file changed, 2 insertions(+), 18 deletions(-)
10
2 files changed, 3 insertions(+), 5 deletions(-)
11
8
12
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
9
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
13
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
14
--- a/include/hw/core/cpu.h
11
--- a/tcg/tci/tcg-target.c.inc
15
+++ b/include/hw/core/cpu.h
12
+++ b/tcg/tci/tcg-target.c.inc
16
@@ -XXX,XX +XXX,XX @@ struct CPUWatchpoint {
13
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
17
* the memory regions get moved around by io_writex.
14
break;
18
*/
15
19
typedef struct SavedIOTLB {
16
CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */
20
- hwaddr addr;
17
- {
21
MemoryRegionSection *section;
18
- TCGArg pos = args[3], len = args[4];
22
hwaddr mr_offset;
19
- TCGArg max = opc == INDEX_op_deposit_i32 ? 32 : 64;
23
} SavedIOTLB;
20
-
24
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
21
- tcg_debug_assert(pos < max);
25
index XXXXXXX..XXXXXXX 100644
22
- tcg_debug_assert(pos + len <= max);
26
--- a/accel/tcg/cputlb.c
23
-
27
+++ b/accel/tcg/cputlb.c
24
- tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], pos, len);
28
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
25
- }
29
* This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
26
+ tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
30
* because of the side effect of io_writex changing memory layout.
27
break;
31
*/
28
32
-static void save_iotlb_data(CPUState *cs, hwaddr addr,
29
CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */
33
- MemoryRegionSection *section, hwaddr mr_offset)
30
CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
34
+static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
31
- {
35
+ hwaddr mr_offset)
32
- TCGArg pos = args[2], len = args[3];
36
{
33
- TCGArg max = type == TCG_TYPE_I32 ? 32 : 64;
37
#ifdef CONFIG_PLUGIN
34
-
38
SavedIOTLB *saved = &cs->saved_iotlb;
35
- tcg_debug_assert(pos < max);
39
- saved->addr = addr;
36
- tcg_debug_assert(pos + len <= max);
40
saved->section = section;
37
-
41
saved->mr_offset = mr_offset;
38
- tcg_out_op_rrbb(s, opc, args[0], args[1], pos, len);
42
#endif
39
- }
43
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
40
+ tcg_out_op_rrbb(s, opc, args[0], args[1], args[2], args[3]);
44
* The memory_region_dispatch may trigger a flush/resize
41
break;
45
* so for plugins we save the iotlb_data just in case.
42
46
*/
43
CASE_32_64(brcond)
47
- save_iotlb_data(cpu, full->xlat_section, section, mr_offset);
48
+ save_iotlb_data(cpu, section, mr_offset);
49
50
if (!qemu_mutex_iothread_locked()) {
51
qemu_mutex_lock_iothread();
52
--
44
--
53
2.34.1
45
2.43.0
54
46
55
47
diff view generated by jsdifflib
1
From: Alex Bennée <alex.bennee@linaro.org>
1
Make extract and sextract "unconditional" in the sense
2
that the opcodes are always present. Rely instead on
3
TCG_TARGET_HAS_{s}extract_valid, now always defined.
2
4
3
This is a heavily used function so lets avoid the cost of
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
CPU_GET_CLASS. On the romulus-bmc run it has a modest effect:
5
6
Before: 36.812 s ± 0.506 s
7
After: 35.912 s ± 0.168 s
8
9
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20220811151413.3350684-4-alex.bennee@linaro.org>
12
Signed-off-by: Cédric Le Goater <clg@kaod.org>
13
Message-Id: <20220923084803.498337-4-clg@kaod.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
7
---
16
hw/core/cpu-sysemu.c | 5 ++---
8
tcg/aarch64/tcg-target-has.h | 4 ----
17
1 file changed, 2 insertions(+), 3 deletions(-)
9
tcg/arm/tcg-target-has.h | 2 --
10
tcg/i386/tcg-target-has.h | 4 ----
11
tcg/loongarch64/tcg-target-has.h | 4 ----
12
tcg/mips/tcg-target-has.h | 4 ----
13
tcg/ppc/tcg-target-has.h | 4 ----
14
tcg/riscv/tcg-target-has.h | 4 ----
15
tcg/s390x/tcg-target-has.h | 4 ----
16
tcg/sparc64/tcg-target-has.h | 4 ----
17
tcg/tcg-has.h | 12 ------------
18
tcg/tci/tcg-target-has.h | 4 ----
19
tcg/optimize.c | 8 ++++----
20
tcg/tcg.c | 12 ++++--------
21
tcg/tci.c | 8 --------
22
14 files changed, 8 insertions(+), 70 deletions(-)
18
23
19
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
24
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
20
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/core/cpu-sysemu.c
26
--- a/tcg/aarch64/tcg-target-has.h
22
+++ b/hw/core/cpu-sysemu.c
27
+++ b/tcg/aarch64/tcg-target-has.h
23
@@ -XXX,XX +XXX,XX @@ hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
28
@@ -XXX,XX +XXX,XX @@
24
29
#define TCG_TARGET_HAS_ctz_i32 1
25
int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
30
#define TCG_TARGET_HAS_ctpop_i32 0
26
{
31
#define TCG_TARGET_HAS_deposit_i32 1
27
- CPUClass *cc = CPU_GET_CLASS(cpu);
32
-#define TCG_TARGET_HAS_extract_i32 1
28
int ret = 0;
33
-#define TCG_TARGET_HAS_sextract_i32 1
29
34
#define TCG_TARGET_HAS_extract2_i32 1
30
- if (cc->sysemu_ops->asidx_from_attrs) {
35
#define TCG_TARGET_HAS_negsetcond_i32 1
31
- ret = cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
36
#define TCG_TARGET_HAS_add2_i32 1
32
+ if (cpu->cc->sysemu_ops->asidx_from_attrs) {
37
@@ -XXX,XX +XXX,XX @@
33
+ ret = cpu->cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
38
#define TCG_TARGET_HAS_ctz_i64 1
34
assert(ret < cpu->num_ases && ret >= 0);
39
#define TCG_TARGET_HAS_ctpop_i64 0
35
}
40
#define TCG_TARGET_HAS_deposit_i64 1
36
return ret;
41
-#define TCG_TARGET_HAS_extract_i64 1
42
-#define TCG_TARGET_HAS_sextract_i64 1
43
#define TCG_TARGET_HAS_extract2_i64 1
44
#define TCG_TARGET_HAS_negsetcond_i64 1
45
#define TCG_TARGET_HAS_add2_i64 1
46
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/arm/tcg-target-has.h
49
+++ b/tcg/arm/tcg-target-has.h
50
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
51
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
52
#define TCG_TARGET_HAS_ctpop_i32 0
53
#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
54
-#define TCG_TARGET_HAS_extract_i32 1
55
-#define TCG_TARGET_HAS_sextract_i32 1
56
#define TCG_TARGET_HAS_extract2_i32 1
57
#define TCG_TARGET_HAS_negsetcond_i32 1
58
#define TCG_TARGET_HAS_mulu2_i32 1
59
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
60
index XXXXXXX..XXXXXXX 100644
61
--- a/tcg/i386/tcg-target-has.h
62
+++ b/tcg/i386/tcg-target-has.h
63
@@ -XXX,XX +XXX,XX @@
64
#define TCG_TARGET_HAS_ctz_i32 1
65
#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
66
#define TCG_TARGET_HAS_deposit_i32 1
67
-#define TCG_TARGET_HAS_extract_i32 1
68
-#define TCG_TARGET_HAS_sextract_i32 1
69
#define TCG_TARGET_HAS_extract2_i32 1
70
#define TCG_TARGET_HAS_negsetcond_i32 1
71
#define TCG_TARGET_HAS_add2_i32 1
72
@@ -XXX,XX +XXX,XX @@
73
#define TCG_TARGET_HAS_ctz_i64 1
74
#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
75
#define TCG_TARGET_HAS_deposit_i64 1
76
-#define TCG_TARGET_HAS_extract_i64 1
77
-#define TCG_TARGET_HAS_sextract_i64 1
78
#define TCG_TARGET_HAS_extract2_i64 1
79
#define TCG_TARGET_HAS_negsetcond_i64 1
80
#define TCG_TARGET_HAS_add2_i64 1
81
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/tcg/loongarch64/tcg-target-has.h
84
+++ b/tcg/loongarch64/tcg-target-has.h
85
@@ -XXX,XX +XXX,XX @@
86
#define TCG_TARGET_HAS_div2_i32 0
87
#define TCG_TARGET_HAS_rot_i32 1
88
#define TCG_TARGET_HAS_deposit_i32 1
89
-#define TCG_TARGET_HAS_extract_i32 1
90
-#define TCG_TARGET_HAS_sextract_i32 1
91
#define TCG_TARGET_HAS_extract2_i32 0
92
#define TCG_TARGET_HAS_add2_i32 0
93
#define TCG_TARGET_HAS_sub2_i32 0
94
@@ -XXX,XX +XXX,XX @@
95
#define TCG_TARGET_HAS_div2_i64 0
96
#define TCG_TARGET_HAS_rot_i64 1
97
#define TCG_TARGET_HAS_deposit_i64 1
98
-#define TCG_TARGET_HAS_extract_i64 1
99
-#define TCG_TARGET_HAS_sextract_i64 1
100
#define TCG_TARGET_HAS_extract2_i64 0
101
#define TCG_TARGET_HAS_extr_i64_i32 1
102
#define TCG_TARGET_HAS_ext8s_i64 1
103
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/mips/tcg-target-has.h
106
+++ b/tcg/mips/tcg-target-has.h
107
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
108
109
/* optional instructions detected at runtime */
110
#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
111
-#define TCG_TARGET_HAS_extract_i32 1
112
-#define TCG_TARGET_HAS_sextract_i32 1
113
#define TCG_TARGET_HAS_extract2_i32 0
114
#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
115
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
116
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
117
#define TCG_TARGET_HAS_bswap32_i64 1
118
#define TCG_TARGET_HAS_bswap64_i64 1
119
#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
120
-#define TCG_TARGET_HAS_extract_i64 1
121
-#define TCG_TARGET_HAS_sextract_i64 1
122
#define TCG_TARGET_HAS_extract2_i64 0
123
#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
124
#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
125
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
126
index XXXXXXX..XXXXXXX 100644
127
--- a/tcg/ppc/tcg-target-has.h
128
+++ b/tcg/ppc/tcg-target-has.h
129
@@ -XXX,XX +XXX,XX @@
130
#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
131
#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
132
#define TCG_TARGET_HAS_deposit_i32 1
133
-#define TCG_TARGET_HAS_extract_i32 1
134
-#define TCG_TARGET_HAS_sextract_i32 1
135
#define TCG_TARGET_HAS_extract2_i32 0
136
#define TCG_TARGET_HAS_negsetcond_i32 1
137
#define TCG_TARGET_HAS_mulu2_i32 0
138
@@ -XXX,XX +XXX,XX @@
139
#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
140
#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
141
#define TCG_TARGET_HAS_deposit_i64 1
142
-#define TCG_TARGET_HAS_extract_i64 1
143
-#define TCG_TARGET_HAS_sextract_i64 1
144
#define TCG_TARGET_HAS_extract2_i64 0
145
#define TCG_TARGET_HAS_negsetcond_i64 1
146
#define TCG_TARGET_HAS_add2_i64 1
147
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
148
index XXXXXXX..XXXXXXX 100644
149
--- a/tcg/riscv/tcg-target-has.h
150
+++ b/tcg/riscv/tcg-target-has.h
151
@@ -XXX,XX +XXX,XX @@
152
#define TCG_TARGET_HAS_div2_i32 0
153
#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
154
#define TCG_TARGET_HAS_deposit_i32 0
155
-#define TCG_TARGET_HAS_extract_i32 1
156
-#define TCG_TARGET_HAS_sextract_i32 1
157
#define TCG_TARGET_HAS_extract2_i32 0
158
#define TCG_TARGET_HAS_add2_i32 1
159
#define TCG_TARGET_HAS_sub2_i32 1
160
@@ -XXX,XX +XXX,XX @@
161
#define TCG_TARGET_HAS_div2_i64 0
162
#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
163
#define TCG_TARGET_HAS_deposit_i64 0
164
-#define TCG_TARGET_HAS_extract_i64 1
165
-#define TCG_TARGET_HAS_sextract_i64 1
166
#define TCG_TARGET_HAS_extract2_i64 0
167
#define TCG_TARGET_HAS_extr_i64_i32 1
168
#define TCG_TARGET_HAS_ext8s_i64 1
169
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
170
index XXXXXXX..XXXXXXX 100644
171
--- a/tcg/s390x/tcg-target-has.h
172
+++ b/tcg/s390x/tcg-target-has.h
173
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
174
#define TCG_TARGET_HAS_ctz_i32 0
175
#define TCG_TARGET_HAS_ctpop_i32 1
176
#define TCG_TARGET_HAS_deposit_i32 1
177
-#define TCG_TARGET_HAS_extract_i32 1
178
-#define TCG_TARGET_HAS_sextract_i32 1
179
#define TCG_TARGET_HAS_extract2_i32 0
180
#define TCG_TARGET_HAS_negsetcond_i32 1
181
#define TCG_TARGET_HAS_add2_i32 1
182
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
183
#define TCG_TARGET_HAS_ctz_i64 0
184
#define TCG_TARGET_HAS_ctpop_i64 1
185
#define TCG_TARGET_HAS_deposit_i64 1
186
-#define TCG_TARGET_HAS_extract_i64 1
187
-#define TCG_TARGET_HAS_sextract_i64 1
188
#define TCG_TARGET_HAS_extract2_i64 0
189
#define TCG_TARGET_HAS_negsetcond_i64 1
190
#define TCG_TARGET_HAS_add2_i64 1
191
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
192
index XXXXXXX..XXXXXXX 100644
193
--- a/tcg/sparc64/tcg-target-has.h
194
+++ b/tcg/sparc64/tcg-target-has.h
195
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
196
#define TCG_TARGET_HAS_ctz_i32 0
197
#define TCG_TARGET_HAS_ctpop_i32 0
198
#define TCG_TARGET_HAS_deposit_i32 0
199
-#define TCG_TARGET_HAS_extract_i32 1
200
-#define TCG_TARGET_HAS_sextract_i32 1
201
#define TCG_TARGET_HAS_extract2_i32 0
202
#define TCG_TARGET_HAS_negsetcond_i32 1
203
#define TCG_TARGET_HAS_add2_i32 1
204
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
205
#define TCG_TARGET_HAS_ctz_i64 0
206
#define TCG_TARGET_HAS_ctpop_i64 0
207
#define TCG_TARGET_HAS_deposit_i64 0
208
-#define TCG_TARGET_HAS_extract_i64 1
209
-#define TCG_TARGET_HAS_sextract_i64 1
210
#define TCG_TARGET_HAS_extract2_i64 0
211
#define TCG_TARGET_HAS_negsetcond_i64 1
212
#define TCG_TARGET_HAS_add2_i64 1
213
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
214
index XXXXXXX..XXXXXXX 100644
215
--- a/tcg/tcg-has.h
216
+++ b/tcg/tcg-has.h
217
@@ -XXX,XX +XXX,XX @@
218
#define TCG_TARGET_HAS_ctz_i64 0
219
#define TCG_TARGET_HAS_ctpop_i64 0
220
#define TCG_TARGET_HAS_deposit_i64 0
221
-#define TCG_TARGET_HAS_extract_i64 0
222
-#define TCG_TARGET_HAS_sextract_i64 0
223
#define TCG_TARGET_HAS_extract2_i64 0
224
#define TCG_TARGET_HAS_negsetcond_i64 0
225
#define TCG_TARGET_HAS_add2_i64 0
226
@@ -XXX,XX +XXX,XX @@
227
#ifndef TCG_TARGET_deposit_i64_valid
228
#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
229
#endif
230
-#ifndef TCG_TARGET_extract_valid
231
-#define TCG_TARGET_extract_valid(type, ofs, len) \
232
- ((type) == TCG_TYPE_I32 ? TCG_TARGET_HAS_extract_i32 \
233
- : TCG_TARGET_HAS_extract_i64)
234
-#endif
235
-#ifndef TCG_TARGET_sextract_valid
236
-#define TCG_TARGET_sextract_valid(type, ofs, len) \
237
- ((type) == TCG_TYPE_I32 ? TCG_TARGET_HAS_sextract_i32 \
238
- : TCG_TARGET_HAS_sextract_i64)
239
-#endif
240
241
/* Only one of DIV or DIV2 should be defined. */
242
#if defined(TCG_TARGET_HAS_div_i32)
243
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
244
index XXXXXXX..XXXXXXX 100644
245
--- a/tcg/tci/tcg-target-has.h
246
+++ b/tcg/tci/tcg-target-has.h
247
@@ -XXX,XX +XXX,XX @@
248
#define TCG_TARGET_HAS_ext16u_i32 1
249
#define TCG_TARGET_HAS_andc_i32 1
250
#define TCG_TARGET_HAS_deposit_i32 1
251
-#define TCG_TARGET_HAS_extract_i32 1
252
-#define TCG_TARGET_HAS_sextract_i32 1
253
#define TCG_TARGET_HAS_extract2_i32 0
254
#define TCG_TARGET_HAS_eqv_i32 1
255
#define TCG_TARGET_HAS_nand_i32 1
256
@@ -XXX,XX +XXX,XX @@
257
#define TCG_TARGET_HAS_bswap32_i64 1
258
#define TCG_TARGET_HAS_bswap64_i64 1
259
#define TCG_TARGET_HAS_deposit_i64 1
260
-#define TCG_TARGET_HAS_extract_i64 1
261
-#define TCG_TARGET_HAS_sextract_i64 1
262
#define TCG_TARGET_HAS_extract2_i64 0
263
#define TCG_TARGET_HAS_div_i64 1
264
#define TCG_TARGET_HAS_rem_i64 1
265
diff --git a/tcg/optimize.c b/tcg/optimize.c
266
index XXXXXXX..XXXXXXX 100644
267
--- a/tcg/optimize.c
268
+++ b/tcg/optimize.c
269
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
270
shr_opc = INDEX_op_shr_i32;
271
neg_opc = INDEX_op_neg_i32;
272
if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
273
- uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0;
274
+ uext_opc = INDEX_op_extract_i32;
275
}
276
if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
277
- sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0;
278
+ sext_opc = INDEX_op_sextract_i32;
279
}
280
break;
281
case TCG_TYPE_I64:
282
@@ -XXX,XX +XXX,XX @@ static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
283
shr_opc = INDEX_op_shr_i64;
284
neg_opc = INDEX_op_neg_i64;
285
if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
286
- uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0;
287
+ uext_opc = INDEX_op_extract_i64;
288
}
289
if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
290
- sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0;
291
+ sext_opc = INDEX_op_sextract_i64;
292
}
293
break;
294
default:
295
diff --git a/tcg/tcg.c b/tcg/tcg.c
296
index XXXXXXX..XXXXXXX 100644
297
--- a/tcg/tcg.c
298
+++ b/tcg/tcg.c
299
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
300
case INDEX_op_shl_i32:
301
case INDEX_op_shr_i32:
302
case INDEX_op_sar_i32:
303
+ case INDEX_op_extract_i32:
304
+ case INDEX_op_sextract_i32:
305
return true;
306
307
case INDEX_op_negsetcond_i32:
308
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
309
return TCG_TARGET_HAS_rot_i32;
310
case INDEX_op_deposit_i32:
311
return TCG_TARGET_HAS_deposit_i32;
312
- case INDEX_op_extract_i32:
313
- return TCG_TARGET_HAS_extract_i32;
314
- case INDEX_op_sextract_i32:
315
- return TCG_TARGET_HAS_sextract_i32;
316
case INDEX_op_extract2_i32:
317
return TCG_TARGET_HAS_extract2_i32;
318
case INDEX_op_add2_i32:
319
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
320
case INDEX_op_sar_i64:
321
case INDEX_op_ext_i32_i64:
322
case INDEX_op_extu_i32_i64:
323
+ case INDEX_op_extract_i64:
324
+ case INDEX_op_sextract_i64:
325
return TCG_TARGET_REG_BITS == 64;
326
327
case INDEX_op_negsetcond_i64:
328
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
329
return TCG_TARGET_HAS_rot_i64;
330
case INDEX_op_deposit_i64:
331
return TCG_TARGET_HAS_deposit_i64;
332
- case INDEX_op_extract_i64:
333
- return TCG_TARGET_HAS_extract_i64;
334
- case INDEX_op_sextract_i64:
335
- return TCG_TARGET_HAS_sextract_i64;
336
case INDEX_op_extract2_i64:
337
return TCG_TARGET_HAS_extract2_i64;
338
case INDEX_op_extrl_i64_i32:
339
diff --git a/tcg/tci.c b/tcg/tci.c
340
index XXXXXXX..XXXXXXX 100644
341
--- a/tcg/tci.c
342
+++ b/tcg/tci.c
343
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
344
regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
345
break;
346
#endif
347
-#if TCG_TARGET_HAS_extract_i32
348
case INDEX_op_extract_i32:
349
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
350
regs[r0] = extract32(regs[r1], pos, len);
351
break;
352
-#endif
353
-#if TCG_TARGET_HAS_sextract_i32
354
case INDEX_op_sextract_i32:
355
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
356
regs[r0] = sextract32(regs[r1], pos, len);
357
break;
358
-#endif
359
case INDEX_op_brcond_i32:
360
tci_args_rl(insn, tb_ptr, &r0, &ptr);
361
if ((uint32_t)regs[r0]) {
362
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
363
regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
364
break;
365
#endif
366
-#if TCG_TARGET_HAS_extract_i64
367
case INDEX_op_extract_i64:
368
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
369
regs[r0] = extract64(regs[r1], pos, len);
370
break;
371
-#endif
372
-#if TCG_TARGET_HAS_sextract_i64
373
case INDEX_op_sextract_i64:
374
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
375
regs[r0] = sextract64(regs[r1], pos, len);
376
break;
377
-#endif
378
case INDEX_op_brcond_i64:
379
tci_args_rl(insn, tb_ptr, &r0, &ptr);
380
if (regs[r0]) {
37
--
381
--
38
2.34.1
382
2.43.0
39
383
40
384
diff view generated by jsdifflib
1
The value previously chosen overlaps GUSA_MASK.
1
Make deposit "unconditional" in the sense that the opcode is
2
always present. Rely instead on TCG_TARGET_deposit_valid,
3
now always defined.
2
4
3
Rename all DELAY_SLOT_* and GUSA_* defines to emphasize
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
that they are included in TB_FLAGs. Add aliases for the
5
FPSCR and SR bits that are included in TB_FLAGS, so that
6
we don't accidentally reassign those bits.
7
8
Fixes: 4da06fb3062 ("target/sh4: Implement prctl_unalign_sigbus")
9
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/856
10
Reviewed-by: Yoshinori Sato <ysato@users.sourceforge.jp>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
7
---
13
target/sh4/cpu.h | 56 +++++++++++++------------
8
tcg/aarch64/tcg-target-has.h | 3 +--
14
linux-user/sh4/signal.c | 6 +--
9
tcg/arm/tcg-target-has.h | 2 +-
15
target/sh4/cpu.c | 6 +--
10
tcg/i386/tcg-target-has.h | 5 +----
16
target/sh4/helper.c | 6 +--
11
tcg/loongarch64/tcg-target-has.h | 3 +--
17
target/sh4/translate.c | 90 ++++++++++++++++++++++-------------------
12
tcg/mips/tcg-target-has.h | 3 +--
18
5 files changed, 88 insertions(+), 76 deletions(-)
13
tcg/ppc/tcg-target-has.h | 3 +--
14
tcg/riscv/tcg-target-has.h | 4 ++--
15
tcg/s390x/tcg-target-has.h | 3 +--
16
tcg/sparc64/tcg-target-has.h | 4 ++--
17
tcg/tcg-has.h | 8 --------
18
tcg/tci/tcg-target-has.h | 3 +--
19
tcg/tcg-op.c | 22 +++++++++++-----------
20
tcg/tcg.c | 31 +++++++++++--------------------
21
tcg/tci.c | 4 ----
22
tcg/tci/tcg-target.c.inc | 2 +-
23
15 files changed, 35 insertions(+), 65 deletions(-)
19
24
20
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
25
diff --git a/tcg/aarch64/tcg-target-has.h b/tcg/aarch64/tcg-target-has.h
21
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
22
--- a/target/sh4/cpu.h
27
--- a/tcg/aarch64/tcg-target-has.h
23
+++ b/target/sh4/cpu.h
28
+++ b/tcg/aarch64/tcg-target-has.h
24
@@ -XXX,XX +XXX,XX @@
29
@@ -XXX,XX +XXX,XX @@
25
#define FPSCR_RM_NEAREST (0 << 0)
30
#define TCG_TARGET_HAS_clz_i32 1
26
#define FPSCR_RM_ZERO (1 << 0)
31
#define TCG_TARGET_HAS_ctz_i32 1
27
32
#define TCG_TARGET_HAS_ctpop_i32 0
28
-#define DELAY_SLOT_MASK 0x7
33
-#define TCG_TARGET_HAS_deposit_i32 1
29
-#define DELAY_SLOT (1 << 0)
34
#define TCG_TARGET_HAS_extract2_i32 1
30
-#define DELAY_SLOT_CONDITIONAL (1 << 1)
35
#define TCG_TARGET_HAS_negsetcond_i32 1
31
-#define DELAY_SLOT_RTE (1 << 2)
36
#define TCG_TARGET_HAS_add2_i32 1
32
+#define TB_FLAG_DELAY_SLOT (1 << 0)
37
@@ -XXX,XX +XXX,XX @@
33
+#define TB_FLAG_DELAY_SLOT_COND (1 << 1)
38
#define TCG_TARGET_HAS_clz_i64 1
34
+#define TB_FLAG_DELAY_SLOT_RTE (1 << 2)
39
#define TCG_TARGET_HAS_ctz_i64 1
35
+#define TB_FLAG_PENDING_MOVCA (1 << 3)
40
#define TCG_TARGET_HAS_ctpop_i64 0
36
+#define TB_FLAG_GUSA_SHIFT 4 /* [11:4] */
41
-#define TCG_TARGET_HAS_deposit_i64 1
37
+#define TB_FLAG_GUSA_EXCLUSIVE (1 << 12)
42
#define TCG_TARGET_HAS_extract2_i64 1
38
+#define TB_FLAG_UNALIGN (1 << 13)
43
#define TCG_TARGET_HAS_negsetcond_i64 1
39
+#define TB_FLAG_SR_FD (1 << SR_FD) /* 15 */
44
#define TCG_TARGET_HAS_add2_i64 1
40
+#define TB_FLAG_FPSCR_PR FPSCR_PR /* 19 */
45
@@ -XXX,XX +XXX,XX @@
41
+#define TB_FLAG_FPSCR_SZ FPSCR_SZ /* 20 */
46
42
+#define TB_FLAG_FPSCR_FR FPSCR_FR /* 21 */
47
#define TCG_TARGET_extract_valid(type, ofs, len) 1
43
+#define TB_FLAG_SR_RB (1 << SR_RB) /* 29 */
48
#define TCG_TARGET_sextract_valid(type, ofs, len) 1
44
+#define TB_FLAG_SR_MD (1 << SR_MD) /* 30 */
49
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
45
50
46
-#define TB_FLAG_PENDING_MOVCA (1 << 3)
51
#endif
47
-#define TB_FLAG_UNALIGN (1 << 4)
52
diff --git a/tcg/arm/tcg-target-has.h b/tcg/arm/tcg-target-has.h
48
-
53
index XXXXXXX..XXXXXXX 100644
49
-#define GUSA_SHIFT 4
54
--- a/tcg/arm/tcg-target-has.h
50
-#ifdef CONFIG_USER_ONLY
55
+++ b/tcg/arm/tcg-target-has.h
51
-#define GUSA_EXCLUSIVE (1 << 12)
56
@@ -XXX,XX +XXX,XX @@ extern bool use_neon_instructions;
52
-#define GUSA_MASK ((0xff << GUSA_SHIFT) | GUSA_EXCLUSIVE)
57
#define TCG_TARGET_HAS_clz_i32 1
53
-#else
58
#define TCG_TARGET_HAS_ctz_i32 use_armv7_instructions
54
-/* Provide dummy versions of the above to allow tests against tbflags
59
#define TCG_TARGET_HAS_ctpop_i32 0
55
- to be elided while avoiding ifdefs. */
60
-#define TCG_TARGET_HAS_deposit_i32 use_armv7_instructions
56
-#define GUSA_EXCLUSIVE 0
61
#define TCG_TARGET_HAS_extract2_i32 1
57
-#define GUSA_MASK 0
62
#define TCG_TARGET_HAS_negsetcond_i32 1
63
#define TCG_TARGET_HAS_mulu2_i32 1
64
@@ -XXX,XX +XXX,XX @@ tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len)
65
66
#define TCG_TARGET_extract_valid tcg_target_extract_valid
67
#define TCG_TARGET_sextract_valid tcg_target_extract_valid
68
+#define TCG_TARGET_deposit_valid(type, ofs, len) use_armv7_instructions
69
70
#endif
71
diff --git a/tcg/i386/tcg-target-has.h b/tcg/i386/tcg-target-has.h
72
index XXXXXXX..XXXXXXX 100644
73
--- a/tcg/i386/tcg-target-has.h
74
+++ b/tcg/i386/tcg-target-has.h
75
@@ -XXX,XX +XXX,XX @@
76
#define TCG_TARGET_HAS_clz_i32 1
77
#define TCG_TARGET_HAS_ctz_i32 1
78
#define TCG_TARGET_HAS_ctpop_i32 have_popcnt
79
-#define TCG_TARGET_HAS_deposit_i32 1
80
#define TCG_TARGET_HAS_extract2_i32 1
81
#define TCG_TARGET_HAS_negsetcond_i32 1
82
#define TCG_TARGET_HAS_add2_i32 1
83
@@ -XXX,XX +XXX,XX @@
84
#define TCG_TARGET_HAS_clz_i64 1
85
#define TCG_TARGET_HAS_ctz_i64 1
86
#define TCG_TARGET_HAS_ctpop_i64 have_popcnt
87
-#define TCG_TARGET_HAS_deposit_i64 1
88
#define TCG_TARGET_HAS_extract2_i64 1
89
#define TCG_TARGET_HAS_negsetcond_i64 1
90
#define TCG_TARGET_HAS_add2_i64 1
91
@@ -XXX,XX +XXX,XX @@
92
#define TCG_TARGET_HAS_cmpsel_vec 1
93
#define TCG_TARGET_HAS_tst_vec have_avx512bw
94
95
-#define TCG_TARGET_deposit_i32_valid(ofs, len) \
96
+#define TCG_TARGET_deposit_valid(type, ofs, len) \
97
(((ofs) == 0 && ((len) == 8 || (len) == 16)) || \
98
(TCG_TARGET_REG_BITS == 32 && (ofs) == 8 && (len) == 8))
99
-#define TCG_TARGET_deposit_i64_valid TCG_TARGET_deposit_i32_valid
100
101
/*
102
* Check for the possibility of low byte/word extraction, high-byte extraction
103
diff --git a/tcg/loongarch64/tcg-target-has.h b/tcg/loongarch64/tcg-target-has.h
104
index XXXXXXX..XXXXXXX 100644
105
--- a/tcg/loongarch64/tcg-target-has.h
106
+++ b/tcg/loongarch64/tcg-target-has.h
107
@@ -XXX,XX +XXX,XX @@
108
#define TCG_TARGET_HAS_rem_i32 1
109
#define TCG_TARGET_HAS_div2_i32 0
110
#define TCG_TARGET_HAS_rot_i32 1
111
-#define TCG_TARGET_HAS_deposit_i32 1
112
#define TCG_TARGET_HAS_extract2_i32 0
113
#define TCG_TARGET_HAS_add2_i32 0
114
#define TCG_TARGET_HAS_sub2_i32 0
115
@@ -XXX,XX +XXX,XX @@
116
#define TCG_TARGET_HAS_rem_i64 1
117
#define TCG_TARGET_HAS_div2_i64 0
118
#define TCG_TARGET_HAS_rot_i64 1
119
-#define TCG_TARGET_HAS_deposit_i64 1
120
#define TCG_TARGET_HAS_extract2_i64 0
121
#define TCG_TARGET_HAS_extr_i64_i32 1
122
#define TCG_TARGET_HAS_ext8s_i64 1
123
@@ -XXX,XX +XXX,XX @@
124
#define TCG_TARGET_HAS_tst_vec 0
125
126
#define TCG_TARGET_extract_valid(type, ofs, len) 1
127
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
128
129
static inline bool
130
tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
131
diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h
132
index XXXXXXX..XXXXXXX 100644
133
--- a/tcg/mips/tcg-target-has.h
134
+++ b/tcg/mips/tcg-target-has.h
135
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
136
#endif
137
138
/* optional instructions detected at runtime */
139
-#define TCG_TARGET_HAS_deposit_i32 use_mips32r2_instructions
140
#define TCG_TARGET_HAS_extract2_i32 0
141
#define TCG_TARGET_HAS_ext8s_i32 use_mips32r2_instructions
142
#define TCG_TARGET_HAS_ext16s_i32 use_mips32r2_instructions
143
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
144
#define TCG_TARGET_HAS_bswap16_i64 1
145
#define TCG_TARGET_HAS_bswap32_i64 1
146
#define TCG_TARGET_HAS_bswap64_i64 1
147
-#define TCG_TARGET_HAS_deposit_i64 use_mips32r2_instructions
148
#define TCG_TARGET_HAS_extract2_i64 0
149
#define TCG_TARGET_HAS_ext8s_i64 use_mips32r2_instructions
150
#define TCG_TARGET_HAS_ext16s_i64 use_mips32r2_instructions
151
@@ -XXX,XX +XXX,XX @@ extern bool use_mips32r2_instructions;
152
#define TCG_TARGET_HAS_tst 0
153
154
#define TCG_TARGET_extract_valid(type, ofs, len) use_mips32r2_instructions
155
+#define TCG_TARGET_deposit_valid(type, ofs, len) use_mips32r2_instructions
156
157
static inline bool
158
tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
159
diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h
160
index XXXXXXX..XXXXXXX 100644
161
--- a/tcg/ppc/tcg-target-has.h
162
+++ b/tcg/ppc/tcg-target-has.h
163
@@ -XXX,XX +XXX,XX @@
164
#define TCG_TARGET_HAS_clz_i32 1
165
#define TCG_TARGET_HAS_ctz_i32 have_isa_3_00
166
#define TCG_TARGET_HAS_ctpop_i32 have_isa_2_06
167
-#define TCG_TARGET_HAS_deposit_i32 1
168
#define TCG_TARGET_HAS_extract2_i32 0
169
#define TCG_TARGET_HAS_negsetcond_i32 1
170
#define TCG_TARGET_HAS_mulu2_i32 0
171
@@ -XXX,XX +XXX,XX @@
172
#define TCG_TARGET_HAS_clz_i64 1
173
#define TCG_TARGET_HAS_ctz_i64 have_isa_3_00
174
#define TCG_TARGET_HAS_ctpop_i64 have_isa_2_06
175
-#define TCG_TARGET_HAS_deposit_i64 1
176
#define TCG_TARGET_HAS_extract2_i64 0
177
#define TCG_TARGET_HAS_negsetcond_i64 1
178
#define TCG_TARGET_HAS_add2_i64 1
179
@@ -XXX,XX +XXX,XX @@
180
#define TCG_TARGET_HAS_tst_vec 0
181
182
#define TCG_TARGET_extract_valid(type, ofs, len) 1
183
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
184
185
static inline bool
186
tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
187
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
188
index XXXXXXX..XXXXXXX 100644
189
--- a/tcg/riscv/tcg-target-has.h
190
+++ b/tcg/riscv/tcg-target-has.h
191
@@ -XXX,XX +XXX,XX @@
192
#define TCG_TARGET_HAS_rem_i32 1
193
#define TCG_TARGET_HAS_div2_i32 0
194
#define TCG_TARGET_HAS_rot_i32 (cpuinfo & CPUINFO_ZBB)
195
-#define TCG_TARGET_HAS_deposit_i32 0
196
#define TCG_TARGET_HAS_extract2_i32 0
197
#define TCG_TARGET_HAS_add2_i32 1
198
#define TCG_TARGET_HAS_sub2_i32 1
199
@@ -XXX,XX +XXX,XX @@
200
#define TCG_TARGET_HAS_rem_i64 1
201
#define TCG_TARGET_HAS_div2_i64 0
202
#define TCG_TARGET_HAS_rot_i64 (cpuinfo & CPUINFO_ZBB)
203
-#define TCG_TARGET_HAS_deposit_i64 0
204
#define TCG_TARGET_HAS_extract2_i64 0
205
#define TCG_TARGET_HAS_extr_i64_i32 1
206
#define TCG_TARGET_HAS_ext8s_i64 1
207
@@ -XXX,XX +XXX,XX @@ tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
208
}
209
#define TCG_TARGET_sextract_valid tcg_target_sextract_valid
210
211
+#define TCG_TARGET_deposit_valid(type, ofs, len) 0
212
+
213
#endif
214
diff --git a/tcg/s390x/tcg-target-has.h b/tcg/s390x/tcg-target-has.h
215
index XXXXXXX..XXXXXXX 100644
216
--- a/tcg/s390x/tcg-target-has.h
217
+++ b/tcg/s390x/tcg-target-has.h
218
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
219
#define TCG_TARGET_HAS_clz_i32 0
220
#define TCG_TARGET_HAS_ctz_i32 0
221
#define TCG_TARGET_HAS_ctpop_i32 1
222
-#define TCG_TARGET_HAS_deposit_i32 1
223
#define TCG_TARGET_HAS_extract2_i32 0
224
#define TCG_TARGET_HAS_negsetcond_i32 1
225
#define TCG_TARGET_HAS_add2_i32 1
226
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
227
#define TCG_TARGET_HAS_clz_i64 1
228
#define TCG_TARGET_HAS_ctz_i64 0
229
#define TCG_TARGET_HAS_ctpop_i64 1
230
-#define TCG_TARGET_HAS_deposit_i64 1
231
#define TCG_TARGET_HAS_extract2_i64 0
232
#define TCG_TARGET_HAS_negsetcond_i64 1
233
#define TCG_TARGET_HAS_add2_i64 1
234
@@ -XXX,XX +XXX,XX @@ extern uint64_t s390_facilities[3];
235
#define TCG_TARGET_HAS_tst_vec 0
236
237
#define TCG_TARGET_extract_valid(type, ofs, len) 1
238
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
239
240
static inline bool
241
tcg_target_sextract_valid(TCGType type, unsigned ofs, unsigned len)
242
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
243
index XXXXXXX..XXXXXXX 100644
244
--- a/tcg/sparc64/tcg-target-has.h
245
+++ b/tcg/sparc64/tcg-target-has.h
246
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
247
#define TCG_TARGET_HAS_clz_i32 0
248
#define TCG_TARGET_HAS_ctz_i32 0
249
#define TCG_TARGET_HAS_ctpop_i32 0
250
-#define TCG_TARGET_HAS_deposit_i32 0
251
#define TCG_TARGET_HAS_extract2_i32 0
252
#define TCG_TARGET_HAS_negsetcond_i32 1
253
#define TCG_TARGET_HAS_add2_i32 1
254
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
255
#define TCG_TARGET_HAS_clz_i64 0
256
#define TCG_TARGET_HAS_ctz_i64 0
257
#define TCG_TARGET_HAS_ctpop_i64 0
258
-#define TCG_TARGET_HAS_deposit_i64 0
259
#define TCG_TARGET_HAS_extract2_i64 0
260
#define TCG_TARGET_HAS_negsetcond_i64 1
261
#define TCG_TARGET_HAS_add2_i64 1
262
@@ -XXX,XX +XXX,XX @@ extern bool use_vis3_instructions;
263
264
#define TCG_TARGET_sextract_valid TCG_TARGET_extract_valid
265
266
+#define TCG_TARGET_deposit_valid(type, ofs, len) 0
267
+
268
#endif
269
diff --git a/tcg/tcg-has.h b/tcg/tcg-has.h
270
index XXXXXXX..XXXXXXX 100644
271
--- a/tcg/tcg-has.h
272
+++ b/tcg/tcg-has.h
273
@@ -XXX,XX +XXX,XX @@
274
#define TCG_TARGET_HAS_clz_i64 0
275
#define TCG_TARGET_HAS_ctz_i64 0
276
#define TCG_TARGET_HAS_ctpop_i64 0
277
-#define TCG_TARGET_HAS_deposit_i64 0
278
#define TCG_TARGET_HAS_extract2_i64 0
279
#define TCG_TARGET_HAS_negsetcond_i64 0
280
#define TCG_TARGET_HAS_add2_i64 0
281
@@ -XXX,XX +XXX,XX @@
282
#define TCG_TARGET_HAS_sub2_i32 1
283
#endif
284
285
-#ifndef TCG_TARGET_deposit_i32_valid
286
-#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
287
-#endif
288
-#ifndef TCG_TARGET_deposit_i64_valid
289
-#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
58
-#endif
290
-#endif
59
-
291
-
60
-#define TB_FLAG_ENVFLAGS_MASK (DELAY_SLOT_MASK | GUSA_MASK)
292
/* Only one of DIV or DIV2 should be defined. */
61
+#define TB_FLAG_DELAY_SLOT_MASK (TB_FLAG_DELAY_SLOT | \
293
#if defined(TCG_TARGET_HAS_div_i32)
62
+ TB_FLAG_DELAY_SLOT_COND | \
294
#define TCG_TARGET_HAS_div2_i32 0
63
+ TB_FLAG_DELAY_SLOT_RTE)
295
diff --git a/tcg/tci/tcg-target-has.h b/tcg/tci/tcg-target-has.h
64
+#define TB_FLAG_GUSA_MASK ((0xff << TB_FLAG_GUSA_SHIFT) | \
296
index XXXXXXX..XXXXXXX 100644
65
+ TB_FLAG_GUSA_EXCLUSIVE)
297
--- a/tcg/tci/tcg-target-has.h
66
+#define TB_FLAG_FPSCR_MASK (TB_FLAG_FPSCR_PR | \
298
+++ b/tcg/tci/tcg-target-has.h
67
+ TB_FLAG_FPSCR_SZ | \
299
@@ -XXX,XX +XXX,XX @@
68
+ TB_FLAG_FPSCR_FR)
300
#define TCG_TARGET_HAS_ext8u_i32 1
69
+#define TB_FLAG_SR_MASK (TB_FLAG_SR_FD | \
301
#define TCG_TARGET_HAS_ext16u_i32 1
70
+ TB_FLAG_SR_RB | \
302
#define TCG_TARGET_HAS_andc_i32 1
71
+ TB_FLAG_SR_MD)
303
-#define TCG_TARGET_HAS_deposit_i32 1
72
+#define TB_FLAG_ENVFLAGS_MASK (TB_FLAG_DELAY_SLOT_MASK | \
304
#define TCG_TARGET_HAS_extract2_i32 0
73
+ TB_FLAG_GUSA_MASK)
305
#define TCG_TARGET_HAS_eqv_i32 1
74
306
#define TCG_TARGET_HAS_nand_i32 1
75
typedef struct tlb_t {
307
@@ -XXX,XX +XXX,XX @@
76
uint32_t vpn;        /* virtual page number */
308
#define TCG_TARGET_HAS_bswap16_i64 1
77
@@ -XXX,XX +XXX,XX @@ static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
309
#define TCG_TARGET_HAS_bswap32_i64 1
310
#define TCG_TARGET_HAS_bswap64_i64 1
311
-#define TCG_TARGET_HAS_deposit_i64 1
312
#define TCG_TARGET_HAS_extract2_i64 0
313
#define TCG_TARGET_HAS_div_i64 1
314
#define TCG_TARGET_HAS_rem_i64 1
315
@@ -XXX,XX +XXX,XX @@
316
317
#define TCG_TARGET_extract_valid(type, ofs, len) 1
318
#define TCG_TARGET_sextract_valid(type, ofs, len) 1
319
+#define TCG_TARGET_deposit_valid(type, ofs, len) 1
320
321
#endif
322
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
323
index XXXXXXX..XXXXXXX 100644
324
--- a/tcg/tcg-op.c
325
+++ b/tcg/tcg-op.c
326
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
327
tcg_gen_mov_i32(ret, arg2);
328
return;
329
}
330
- if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) {
331
+ if (TCG_TARGET_deposit_valid(TCG_TYPE_I32, ofs, len)) {
332
tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len);
333
return;
334
}
335
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
336
tcg_gen_shli_i32(ret, arg, ofs);
337
} else if (ofs == 0) {
338
tcg_gen_andi_i32(ret, arg, (1u << len) - 1);
339
- } else if (TCG_TARGET_HAS_deposit_i32
340
- && TCG_TARGET_deposit_i32_valid(ofs, len)) {
341
+ } else if (TCG_TARGET_deposit_valid(TCG_TYPE_I32, ofs, len)) {
342
TCGv_i32 zero = tcg_constant_i32(0);
343
tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len);
344
} else {
345
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
346
tcg_gen_mov_i64(ret, arg2);
347
return;
348
}
349
- if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) {
350
- tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len);
351
- return;
352
- }
353
354
- if (TCG_TARGET_REG_BITS == 32) {
355
+ if (TCG_TARGET_REG_BITS == 64) {
356
+ if (TCG_TARGET_deposit_valid(TCG_TYPE_I64, ofs, len)) {
357
+ tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len);
358
+ return;
359
+ }
360
+ } else {
361
if (ofs >= 32) {
362
tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1),
363
TCGV_LOW(arg2), ofs - 32, len);
364
@@ -XXX,XX +XXX,XX @@ void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
365
tcg_gen_shli_i64(ret, arg, ofs);
366
} else if (ofs == 0) {
367
tcg_gen_andi_i64(ret, arg, (1ull << len) - 1);
368
- } else if (TCG_TARGET_HAS_deposit_i64
369
- && TCG_TARGET_deposit_i64_valid(ofs, len)) {
370
+ } else if (TCG_TARGET_REG_BITS == 64 &&
371
+ TCG_TARGET_deposit_valid(TCG_TYPE_I64, ofs, len)) {
372
TCGv_i64 zero = tcg_constant_i64(0);
373
tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len);
374
} else {
375
@@ -XXX,XX +XXX,XX @@ void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high)
376
tcg_gen_extu_i32_i64(dest, low);
377
/* If deposit is available, use it. Otherwise use the extra
378
knowledge that we have of the zero-extensions above. */
379
- if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) {
380
+ if (TCG_TARGET_deposit_valid(TCG_TYPE_I64, 32, 32)) {
381
tcg_gen_deposit_i64(dest, dest, tmp, 32, 32);
382
} else {
383
tcg_gen_shli_i64(tmp, tmp, 32);
384
diff --git a/tcg/tcg.c b/tcg/tcg.c
385
index XXXXXXX..XXXXXXX 100644
386
--- a/tcg/tcg.c
387
+++ b/tcg/tcg.c
388
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
389
case INDEX_op_sar_i32:
390
case INDEX_op_extract_i32:
391
case INDEX_op_sextract_i32:
392
+ case INDEX_op_deposit_i32:
393
return true;
394
395
case INDEX_op_negsetcond_i32:
396
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
397
case INDEX_op_rotl_i32:
398
case INDEX_op_rotr_i32:
399
return TCG_TARGET_HAS_rot_i32;
400
- case INDEX_op_deposit_i32:
401
- return TCG_TARGET_HAS_deposit_i32;
402
case INDEX_op_extract2_i32:
403
return TCG_TARGET_HAS_extract2_i32;
404
case INDEX_op_add2_i32:
405
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
406
case INDEX_op_extu_i32_i64:
407
case INDEX_op_extract_i64:
408
case INDEX_op_sextract_i64:
409
+ case INDEX_op_deposit_i64:
410
return TCG_TARGET_REG_BITS == 64;
411
412
case INDEX_op_negsetcond_i64:
413
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
414
case INDEX_op_rotl_i64:
415
case INDEX_op_rotr_i64:
416
return TCG_TARGET_HAS_rot_i64;
417
- case INDEX_op_deposit_i64:
418
- return TCG_TARGET_HAS_deposit_i64;
419
case INDEX_op_extract2_i64:
420
return TCG_TARGET_HAS_extract2_i64;
421
case INDEX_op_extrl_i64_i32:
422
@@ -XXX,XX +XXX,XX @@ bool tcg_op_supported(TCGOpcode op, TCGType type, unsigned flags)
423
424
bool tcg_op_deposit_valid(TCGType type, unsigned ofs, unsigned len)
78
{
425
{
79
/* The instruction in a RTE delay slot is fetched in privileged
426
+ unsigned width;
80
mode, but executed in user mode. */
427
+
81
- if (ifetch && (env->flags & DELAY_SLOT_RTE)) {
428
+ tcg_debug_assert(type == TCG_TYPE_I32 || type == TCG_TYPE_I64);
82
+ if (ifetch && (env->flags & TB_FLAG_DELAY_SLOT_RTE)) {
429
+ width = (type == TCG_TYPE_I32 ? 32 : 64);
83
return 0;
430
+
84
} else {
431
+ tcg_debug_assert(ofs < width);
85
return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
432
tcg_debug_assert(len > 0);
86
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
433
- switch (type) {
87
{
434
- case TCG_TYPE_I32:
88
*pc = env->pc;
435
- tcg_debug_assert(ofs < 32);
89
/* For a gUSA region, notice the end of the region. */
436
- tcg_debug_assert(len <= 32);
90
- *cs_base = env->flags & GUSA_MASK ? env->gregs[0] : 0;
437
- tcg_debug_assert(ofs + len <= 32);
91
- *flags = env->flags /* TB_FLAG_ENVFLAGS_MASK: bits 0-2, 4-12 */
438
- return TCG_TARGET_HAS_deposit_i32 &&
92
- | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
439
- TCG_TARGET_deposit_i32_valid(ofs, len);
93
- | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
440
- case TCG_TYPE_I64:
94
- | (env->sr & (1u << SR_FD)) /* Bit 15 */
441
- tcg_debug_assert(ofs < 64);
95
+ *cs_base = env->flags & TB_FLAG_GUSA_MASK ? env->gregs[0] : 0;
442
- tcg_debug_assert(len <= 64);
96
+ *flags = env->flags
443
- tcg_debug_assert(ofs + len <= 64);
97
+ | (env->fpscr & TB_FLAG_FPSCR_MASK)
444
- return TCG_TARGET_HAS_deposit_i64 &&
98
+ | (env->sr & TB_FLAG_SR_MASK)
445
- TCG_TARGET_deposit_i64_valid(ofs, len);
99
| (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
446
- default:
100
#ifdef CONFIG_USER_ONLY
447
- g_assert_not_reached();
101
*flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
448
- }
102
diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c
449
+ tcg_debug_assert(len <= width - ofs);
103
index XXXXXXX..XXXXXXX 100644
450
+
104
--- a/linux-user/sh4/signal.c
451
+ return TCG_TARGET_deposit_valid(type, ofs, len);
105
+++ b/linux-user/sh4/signal.c
106
@@ -XXX,XX +XXX,XX @@ static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
107
__get_user(regs->fpul, &sc->sc_fpul);
108
109
regs->tra = -1; /* disable syscall checks */
110
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
111
+ regs->flags = 0;
112
}
452
}
113
453
114
void setup_frame(int sig, struct target_sigaction *ka,
454
static TCGOp *tcg_op_alloc(TCGOpcode opc, unsigned nargs);
115
@@ -XXX,XX +XXX,XX @@ void setup_frame(int sig, struct target_sigaction *ka,
455
diff --git a/tcg/tci.c b/tcg/tci.c
116
regs->gregs[5] = 0;
456
index XXXXXXX..XXXXXXX 100644
117
regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
457
--- a/tcg/tci.c
118
regs->pc = (unsigned long) ka->_sa_handler;
458
+++ b/tcg/tci.c
119
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
459
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
120
+ regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK);
460
regs[r0] = ror32(regs[r1], regs[r2] & 31);
121
461
break;
122
unlock_user_struct(frame, frame_addr, 1);
462
#endif
123
return;
463
-#if TCG_TARGET_HAS_deposit_i32
124
@@ -XXX,XX +XXX,XX @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
464
case INDEX_op_deposit_i32:
125
regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
465
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
126
regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
466
regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
127
regs->pc = (unsigned long) ka->_sa_handler;
467
break;
128
- regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
468
-#endif
129
+ regs->flags &= ~(TB_FLAG_DELAY_SLOT_MASK | TB_FLAG_GUSA_MASK);
469
case INDEX_op_extract_i32:
130
470
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
131
unlock_user_struct(frame, frame_addr, 1);
471
regs[r0] = extract32(regs[r1], pos, len);
132
return;
472
@@ -XXX,XX +XXX,XX @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
133
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
473
regs[r0] = ror64(regs[r1], regs[r2] & 63);
134
index XXXXXXX..XXXXXXX 100644
474
break;
135
--- a/target/sh4/cpu.c
475
#endif
136
+++ b/target/sh4/cpu.c
476
-#if TCG_TARGET_HAS_deposit_i64
137
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_synchronize_from_tb(CPUState *cs,
477
case INDEX_op_deposit_i64:
138
SuperHCPU *cpu = SUPERH_CPU(cs);
478
tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
139
479
regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
140
cpu->env.pc = tb_pc(tb);
480
break;
141
- cpu->env.flags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
481
-#endif
142
+ cpu->env.flags = tb->flags;
482
case INDEX_op_extract_i64:
143
}
483
tci_args_rrbb(insn, &r0, &r1, &pos, &len);
144
484
regs[r0] = extract64(regs[r1], pos, len);
145
#ifndef CONFIG_USER_ONLY
485
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
146
@@ -XXX,XX +XXX,XX @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
486
index XXXXXXX..XXXXXXX 100644
147
SuperHCPU *cpu = SUPERH_CPU(cs);
487
--- a/tcg/tci/tcg-target.c.inc
148
CPUSH4State *env = &cpu->env;
488
+++ b/tcg/tci/tcg-target.c.inc
149
489
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
150
- if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
490
tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
151
+ if ((env->flags & (TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND))
491
break;
152
&& env->pc != tb_pc(tb)) {
492
153
env->pc -= 2;
493
- CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */
154
- env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
494
+ CASE_32_64(deposit)
155
+ env->flags &= ~(TB_FLAG_DELAY_SLOT | TB_FLAG_DELAY_SLOT_COND);
495
tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], args[3], args[4]);
156
return true;
496
break;
157
}
497
158
return false;
159
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/sh4/helper.c
162
+++ b/target/sh4/helper.c
163
@@ -XXX,XX +XXX,XX @@ void superh_cpu_do_interrupt(CPUState *cs)
164
env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
165
env->lock_addr = -1;
166
167
- if (env->flags & DELAY_SLOT_MASK) {
168
+ if (env->flags & TB_FLAG_DELAY_SLOT_MASK) {
169
/* Branch instruction should be executed again before delay slot. */
170
    env->spc -= 2;
171
    /* Clear flags for exception/interrupt routine. */
172
- env->flags &= ~DELAY_SLOT_MASK;
173
+ env->flags &= ~TB_FLAG_DELAY_SLOT_MASK;
174
}
175
176
if (do_exp) {
177
@@ -XXX,XX +XXX,XX @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
178
CPUSH4State *env = &cpu->env;
179
180
/* Delay slots are indivisible, ignore interrupts */
181
- if (env->flags & DELAY_SLOT_MASK) {
182
+ if (env->flags & TB_FLAG_DELAY_SLOT_MASK) {
183
return false;
184
} else {
185
superh_cpu_do_interrupt(cs);
186
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
187
index XXXXXXX..XXXXXXX 100644
188
--- a/target/sh4/translate.c
189
+++ b/target/sh4/translate.c
190
@@ -XXX,XX +XXX,XX @@ void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
191
         i, env->gregs[i], i + 1, env->gregs[i + 1],
192
         i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
193
}
194
- if (env->flags & DELAY_SLOT) {
195
+ if (env->flags & TB_FLAG_DELAY_SLOT) {
196
qemu_printf("in delay slot (delayed_pc=0x%08x)\n",
197
         env->delayed_pc);
198
- } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
199
+ } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
200
qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n",
201
         env->delayed_pc);
202
- } else if (env->flags & DELAY_SLOT_RTE) {
203
+ } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
204
qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
205
env->delayed_pc);
206
}
207
@@ -XXX,XX +XXX,XX @@ static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
208
209
static inline bool use_exit_tb(DisasContext *ctx)
210
{
211
- return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
212
+ return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
213
}
214
215
static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
216
@@ -XXX,XX +XXX,XX @@ static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
217
TCGLabel *l1 = gen_new_label();
218
TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
219
220
- if (ctx->tbflags & GUSA_EXCLUSIVE) {
221
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
222
/* When in an exclusive region, we must continue to the end.
223
Therefore, exit the region on a taken branch, but otherwise
224
fall through to the next instruction. */
225
tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
226
- tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
227
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
228
/* Note that this won't actually use a goto_tb opcode because we
229
disallow it in use_goto_tb, but it handles exit + singlestep. */
230
gen_goto_tb(ctx, 0, dest);
231
@@ -XXX,XX +XXX,XX @@ static void gen_delayed_conditional_jump(DisasContext * ctx)
232
tcg_gen_mov_i32(ds, cpu_delayed_cond);
233
tcg_gen_discard_i32(cpu_delayed_cond);
234
235
- if (ctx->tbflags & GUSA_EXCLUSIVE) {
236
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
237
/* When in an exclusive region, we must continue to the end.
238
Therefore, exit the region on a taken branch, but otherwise
239
fall through to the next instruction. */
240
tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
241
242
/* Leave the gUSA region. */
243
- tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
244
+ tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
245
gen_jump(ctx);
246
247
gen_set_label(l1);
248
@@ -XXX,XX +XXX,XX @@ static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
249
#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
250
251
#define CHECK_NOT_DELAY_SLOT \
252
- if (ctx->envflags & DELAY_SLOT_MASK) { \
253
- goto do_illegal_slot; \
254
+ if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \
255
+ goto do_illegal_slot; \
256
}
257
258
#define CHECK_PRIVILEGED \
259
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
260
case 0x000b:        /* rts */
261
    CHECK_NOT_DELAY_SLOT
262
    tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
263
- ctx->envflags |= DELAY_SLOT;
264
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
265
    ctx->delayed_pc = (uint32_t) - 1;
266
    return;
267
case 0x0028:        /* clrmac */
268
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
269
    CHECK_NOT_DELAY_SLOT
270
gen_write_sr(cpu_ssr);
271
    tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
272
- ctx->envflags |= DELAY_SLOT_RTE;
273
+ ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
274
    ctx->delayed_pc = (uint32_t) - 1;
275
ctx->base.is_jmp = DISAS_STOP;
276
    return;
277
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
278
    return;
279
case 0xe000:        /* mov #imm,Rn */
280
#ifdef CONFIG_USER_ONLY
281
- /* Detect the start of a gUSA region. If so, update envflags
282
- and end the TB. This will allow us to see the end of the
283
- region (stored in R0) in the next TB. */
284
+ /*
285
+ * Detect the start of a gUSA region (mov #-n, r15).
286
+ * If so, update envflags and end the TB. This will allow us
287
+ * to see the end of the region (stored in R0) in the next TB.
288
+ */
289
if (B11_8 == 15 && B7_0s < 0 &&
290
(tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
291
- ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
292
+ ctx->envflags =
293
+ deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
294
ctx->base.is_jmp = DISAS_STOP;
295
}
296
#endif
297
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
298
case 0xa000:        /* bra disp */
299
    CHECK_NOT_DELAY_SLOT
300
ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
301
- ctx->envflags |= DELAY_SLOT;
302
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
303
    return;
304
case 0xb000:        /* bsr disp */
305
    CHECK_NOT_DELAY_SLOT
306
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
307
ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
308
- ctx->envflags |= DELAY_SLOT;
309
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
310
    return;
311
}
312
313
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
314
    CHECK_NOT_DELAY_SLOT
315
tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
316
ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
317
- ctx->envflags |= DELAY_SLOT_CONDITIONAL;
318
+ ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
319
    return;
320
case 0x8900:        /* bt label */
321
    CHECK_NOT_DELAY_SLOT
322
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
323
    CHECK_NOT_DELAY_SLOT
324
tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
325
ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
326
- ctx->envflags |= DELAY_SLOT_CONDITIONAL;
327
+ ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
328
    return;
329
case 0x8800:        /* cmp/eq #imm,R0 */
330
tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
331
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
332
case 0x0023:        /* braf Rn */
333
    CHECK_NOT_DELAY_SLOT
334
tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
335
- ctx->envflags |= DELAY_SLOT;
336
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
337
    ctx->delayed_pc = (uint32_t) - 1;
338
    return;
339
case 0x0003:        /* bsrf Rn */
340
    CHECK_NOT_DELAY_SLOT
341
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
342
    tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
343
- ctx->envflags |= DELAY_SLOT;
344
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
345
    ctx->delayed_pc = (uint32_t) - 1;
346
    return;
347
case 0x4015:        /* cmp/pl Rn */
348
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
349
case 0x402b:        /* jmp @Rn */
350
    CHECK_NOT_DELAY_SLOT
351
    tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
352
- ctx->envflags |= DELAY_SLOT;
353
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
354
    ctx->delayed_pc = (uint32_t) - 1;
355
    return;
356
case 0x400b:        /* jsr @Rn */
357
    CHECK_NOT_DELAY_SLOT
358
tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
359
    tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
360
- ctx->envflags |= DELAY_SLOT;
361
+ ctx->envflags |= TB_FLAG_DELAY_SLOT;
362
    ctx->delayed_pc = (uint32_t) - 1;
363
    return;
364
case 0x400e:        /* ldc Rm,SR */
365
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
366
fflush(stderr);
367
#endif
368
do_illegal:
369
- if (ctx->envflags & DELAY_SLOT_MASK) {
370
+ if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
371
do_illegal_slot:
372
gen_save_cpu_state(ctx, true);
373
gen_helper_raise_slot_illegal_instruction(cpu_env);
374
@@ -XXX,XX +XXX,XX @@ static void _decode_opc(DisasContext * ctx)
375
376
do_fpu_disabled:
377
gen_save_cpu_state(ctx, true);
378
- if (ctx->envflags & DELAY_SLOT_MASK) {
379
+ if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
380
gen_helper_raise_slot_fpu_disable(cpu_env);
381
} else {
382
gen_helper_raise_fpu_disable(cpu_env);
383
@@ -XXX,XX +XXX,XX @@ static void decode_opc(DisasContext * ctx)
384
385
_decode_opc(ctx);
386
387
- if (old_flags & DELAY_SLOT_MASK) {
388
+ if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
389
/* go out of the delay slot */
390
- ctx->envflags &= ~DELAY_SLOT_MASK;
391
+ ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
392
393
/* When in an exclusive region, we must continue to the end
394
for conditional branches. */
395
- if (ctx->tbflags & GUSA_EXCLUSIVE
396
- && old_flags & DELAY_SLOT_CONDITIONAL) {
397
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
398
+ && old_flags & TB_FLAG_DELAY_SLOT_COND) {
399
gen_delayed_conditional_jump(ctx);
400
return;
401
}
402
/* Otherwise this is probably an invalid gUSA region.
403
Drop the GUSA bits so the next TB doesn't see them. */
404
- ctx->envflags &= ~GUSA_MASK;
405
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
406
407
tcg_gen_movi_i32(cpu_flags, ctx->envflags);
408
- if (old_flags & DELAY_SLOT_CONDITIONAL) {
409
+ if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
410
     gen_delayed_conditional_jump(ctx);
411
} else {
412
gen_jump(ctx);
413
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
414
}
415
416
/* The entire region has been translated. */
417
- ctx->envflags &= ~GUSA_MASK;
418
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
419
ctx->base.pc_next = pc_end;
420
ctx->base.num_insns += max_insns - 1;
421
return;
422
@@ -XXX,XX +XXX,XX @@ static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
423
424
/* Restart with the EXCLUSIVE bit set, within a TB run via
425
cpu_exec_step_atomic holding the exclusive lock. */
426
- ctx->envflags |= GUSA_EXCLUSIVE;
427
+ ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
428
gen_save_cpu_state(ctx, false);
429
gen_helper_exclusive(cpu_env);
430
ctx->base.is_jmp = DISAS_NORETURN;
431
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
432
(tbflags & (1 << SR_RB))) * 0x10;
433
ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
434
435
- if (tbflags & GUSA_MASK) {
436
+#ifdef CONFIG_USER_ONLY
437
+ if (tbflags & TB_FLAG_GUSA_MASK) {
438
+ /* In gUSA exclusive region. */
439
uint32_t pc = ctx->base.pc_next;
440
uint32_t pc_end = ctx->base.tb->cs_base;
441
- int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
442
+ int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
443
int max_insns = (pc_end - pc) / 2;
444
445
if (pc != pc_end + backup || max_insns < 2) {
446
/* This is a malformed gUSA region. Don't do anything special,
447
since the interpreter is likely to get confused. */
448
- ctx->envflags &= ~GUSA_MASK;
449
- } else if (tbflags & GUSA_EXCLUSIVE) {
450
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
451
+ } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
452
/* Regardless of single-stepping or the end of the page,
453
we must complete execution of the gUSA region while
454
holding the exclusive lock. */
455
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
456
return;
457
}
458
}
459
+#endif
460
461
/* Since the ISA is fixed-width, we can bound by the number
462
of instructions remaining on the page. */
463
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
464
DisasContext *ctx = container_of(dcbase, DisasContext, base);
465
466
#ifdef CONFIG_USER_ONLY
467
- if (unlikely(ctx->envflags & GUSA_MASK)
468
- && !(ctx->envflags & GUSA_EXCLUSIVE)) {
469
+ if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
470
+ && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
471
/* We're in an gUSA region, and we have not already fallen
472
back on using an exclusive region. Attempt to parse the
473
region into a single supported atomic operation. Failure
474
@@ -XXX,XX +XXX,XX @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
475
{
476
DisasContext *ctx = container_of(dcbase, DisasContext, base);
477
478
- if (ctx->tbflags & GUSA_EXCLUSIVE) {
479
+ if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
480
/* Ending the region of exclusivity. Clear the bits. */
481
- ctx->envflags &= ~GUSA_MASK;
482
+ ctx->envflags &= ~TB_FLAG_GUSA_MASK;
483
}
484
485
switch (ctx->base.is_jmp) {
486
--
498
--
487
2.34.1
499
2.43.0
500
501
diff view generated by jsdifflib
New patch
1
Acked-by: Alistair Francis <alistair.francis@wdc.com>
2
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
3
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Message-ID: <20250102181601.1421059-2-richard.henderson@linaro.org>
5
---
6
host/include/riscv/host/cpuinfo.h | 5 +++--
7
util/cpuinfo-riscv.c | 18 ++++++++++++++++--
8
2 files changed, 19 insertions(+), 4 deletions(-)
1
9
10
diff --git a/host/include/riscv/host/cpuinfo.h b/host/include/riscv/host/cpuinfo.h
11
index XXXXXXX..XXXXXXX 100644
12
--- a/host/include/riscv/host/cpuinfo.h
13
+++ b/host/include/riscv/host/cpuinfo.h
14
@@ -XXX,XX +XXX,XX @@
15
#define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */
16
#define CPUINFO_ZBA (1u << 1)
17
#define CPUINFO_ZBB (1u << 2)
18
-#define CPUINFO_ZICOND (1u << 3)
19
-#define CPUINFO_ZVE64X (1u << 4)
20
+#define CPUINFO_ZBS (1u << 3)
21
+#define CPUINFO_ZICOND (1u << 4)
22
+#define CPUINFO_ZVE64X (1u << 5)
23
24
/* Initialized with a constructor. */
25
extern unsigned cpuinfo;
26
diff --git a/util/cpuinfo-riscv.c b/util/cpuinfo-riscv.c
27
index XXXXXXX..XXXXXXX 100644
28
--- a/util/cpuinfo-riscv.c
29
+++ b/util/cpuinfo-riscv.c
30
@@ -XXX,XX +XXX,XX @@ static void sigill_handler(int signo, siginfo_t *si, void *data)
31
/* Called both as constructor and (possibly) via other constructors. */
32
unsigned __attribute__((constructor)) cpuinfo_init(void)
33
{
34
- unsigned left = CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZICOND | CPUINFO_ZVE64X;
35
+ unsigned left = CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZBS
36
+ | CPUINFO_ZICOND | CPUINFO_ZVE64X;
37
unsigned info = cpuinfo;
38
39
if (info) {
40
@@ -XXX,XX +XXX,XX @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
41
#if defined(__riscv_arch_test) && defined(__riscv_zbb)
42
info |= CPUINFO_ZBB;
43
#endif
44
+#if defined(__riscv_arch_test) && defined(__riscv_zbs)
45
+ info |= CPUINFO_ZBS;
46
+#endif
47
#if defined(__riscv_arch_test) && defined(__riscv_zicond)
48
info |= CPUINFO_ZICOND;
49
#endif
50
@@ -XXX,XX +XXX,XX @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
51
&& pair.key >= 0) {
52
info |= pair.value & RISCV_HWPROBE_EXT_ZBA ? CPUINFO_ZBA : 0;
53
info |= pair.value & RISCV_HWPROBE_EXT_ZBB ? CPUINFO_ZBB : 0;
54
- left &= ~(CPUINFO_ZBA | CPUINFO_ZBB);
55
+ info |= pair.value & RISCV_HWPROBE_EXT_ZBS ? CPUINFO_ZBS : 0;
56
+ left &= ~(CPUINFO_ZBA | CPUINFO_ZBB | CPUINFO_ZBS);
57
#ifdef RISCV_HWPROBE_EXT_ZICOND
58
info |= pair.value & RISCV_HWPROBE_EXT_ZICOND ? CPUINFO_ZICOND : 0;
59
left &= ~CPUINFO_ZICOND;
60
@@ -XXX,XX +XXX,XX @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
61
left &= ~CPUINFO_ZBB;
62
}
63
64
+ if (left & CPUINFO_ZBS) {
65
+ /* Probe for Zbs: bext zero,zero,zero. */
66
+ got_sigill = 0;
67
+ asm volatile(".insn r 0x33, 5, 0x24, zero, zero, zero"
68
+ : : : "memory");
69
+ info |= got_sigill ? 0 : CPUINFO_ZBS;
70
+ left &= ~CPUINFO_ZBS;
71
+ }
72
+
73
if (left & CPUINFO_ZICOND) {
74
/* Probe for Zicond: czero.eqz zero,zero,zero. */
75
got_sigill = 0;
76
--
77
2.43.0
78
79
diff view generated by jsdifflib
New patch
1
Acked-by: Alistair Francis <alistair.francis@wdc.com>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3
Message-ID: <20250102181601.1421059-3-richard.henderson@linaro.org>
4
---
5
tcg/riscv/tcg-target-has.h | 8 +++++++-
6
tcg/riscv/tcg-target.c.inc | 11 +++++++++--
7
2 files changed, 16 insertions(+), 3 deletions(-)
1
8
9
diff --git a/tcg/riscv/tcg-target-has.h b/tcg/riscv/tcg-target-has.h
10
index XXXXXXX..XXXXXXX 100644
11
--- a/tcg/riscv/tcg-target-has.h
12
+++ b/tcg/riscv/tcg-target-has.h
13
@@ -XXX,XX +XXX,XX @@ tcg_target_extract_valid(TCGType type, unsigned ofs, unsigned len)
14
/* ofs > 0 uses SRLIW; ofs == 0 uses add.uw. */
15
return ofs || (cpuinfo & CPUINFO_ZBA);
16
}
17
- return (cpuinfo & CPUINFO_ZBB) && ofs == 0 && len == 16;
18
+ switch (len) {
19
+ case 1:
20
+ return (cpuinfo & CPUINFO_ZBS) && ofs != 0;
21
+ case 16:
22
+ return (cpuinfo & CPUINFO_ZBB) && ofs == 0;
23
+ }
24
+ return false;
25
}
26
#define TCG_TARGET_extract_valid tcg_target_extract_valid
27
28
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tcg/riscv/tcg-target.c.inc
31
+++ b/tcg/riscv/tcg-target.c.inc
32
@@ -XXX,XX +XXX,XX @@ typedef enum {
33
OPC_ANDI = 0x7013,
34
OPC_AUIPC = 0x17,
35
OPC_BEQ = 0x63,
36
+ OPC_BEXTI = 0x48005013,
37
OPC_BGE = 0x5063,
38
OPC_BGEU = 0x7063,
39
OPC_BLT = 0x4063,
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
41
}
42
/* FALLTHRU */
43
case INDEX_op_extract_i32:
44
- if (a2 == 0 && args[3] == 16) {
45
+ switch (args[3]) {
46
+ case 1:
47
+ tcg_out_opc_imm(s, OPC_BEXTI, a0, a1, a2);
48
+ break;
49
+ case 16:
50
+ tcg_debug_assert(a2 == 0);
51
tcg_out_ext16u(s, a0, a1);
52
- } else {
53
+ break;
54
+ default:
55
g_assert_not_reached();
56
}
57
break;
58
--
59
2.43.0
diff view generated by jsdifflib
New patch
1
From: Helge Deller <deller@kernel.org>
1
2
3
Add some missing fields which may be parsed by userspace applications.
4
5
Signed-off-by: Helge Deller <deller@gmx.de>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-ID: <Z39B1wzNNpndmOxZ@p100>
9
---
10
linux-user/sparc/target_proc.h | 20 +++++++++++++++++++-
11
1 file changed, 19 insertions(+), 1 deletion(-)
12
13
diff --git a/linux-user/sparc/target_proc.h b/linux-user/sparc/target_proc.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/linux-user/sparc/target_proc.h
16
+++ b/linux-user/sparc/target_proc.h
17
@@ -XXX,XX +XXX,XX @@
18
19
static int open_cpuinfo(CPUArchState *cpu_env, int fd)
20
{
21
- dprintf(fd, "type\t\t: sun4u\n");
22
+ int i, num_cpus;
23
+ const char *cpu_type;
24
+
25
+ num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
26
+ if (cpu_env->def.features & CPU_FEATURE_HYPV) {
27
+ cpu_type = "sun4v";
28
+ } else {
29
+ cpu_type = "sun4u";
30
+ }
31
+
32
+ dprintf(fd, "cpu\t\t: %s (QEMU)\n", cpu_env->def.name);
33
+ dprintf(fd, "type\t\t: %s\n", cpu_type);
34
+ dprintf(fd, "ncpus probed\t: %d\n", num_cpus);
35
+ dprintf(fd, "ncpus active\t: %d\n", num_cpus);
36
+ dprintf(fd, "State:\n");
37
+ for (i = 0; i < num_cpus; i++) {
38
+ dprintf(fd, "CPU%d:\t\t: online\n", i);
39
+ }
40
+
41
return 0;
42
}
43
#define HAVE_ARCH_PROC_CPUINFO
44
--
45
2.43.0
diff view generated by jsdifflib
1
Let tb->page_addr[0] contain the address of the first byte of the
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
translated block, rather than the address of the page containing the
3
start of the translated block. We need to recover this value anyway
4
at various points, and it is easier to discard a page offset when it
5
is not needed, which happens naturally via the existing find_page shift.
6
2
3
These similarly named functions serve different purposes; add
4
docstrings to highlight them.
5
6
Suggested-by: Alex Bennée <alex.bennee@linaro.org>
7
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
8
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-ID: <20250116213214.5695-1-iii@linux.ibm.com>
9
---
12
---
10
accel/tcg/cpu-exec.c | 16 ++++++++--------
13
include/tcg/tcg.h | 41 +++++++++++++++++++++++++++++++++++++++++
11
accel/tcg/cputlb.c | 3 ++-
14
accel/tcg/cpu-exec.c | 15 ++++++++++++++-
12
accel/tcg/translate-all.c | 9 +++++----
15
2 files changed, 55 insertions(+), 1 deletion(-)
13
3 files changed, 15 insertions(+), 13 deletions(-)
14
16
17
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/tcg/tcg.h
20
+++ b/include/tcg/tcg.h
21
@@ -XXX,XX +XXX,XX @@ void tcg_region_reset_all(void);
22
size_t tcg_code_size(void);
23
size_t tcg_code_capacity(void);
24
25
+/**
26
+ * tcg_tb_insert:
27
+ * @tb: translation block to insert
28
+ *
29
+ * Insert @tb into the region trees.
30
+ */
31
void tcg_tb_insert(TranslationBlock *tb);
32
+
33
+/**
34
+ * tcg_tb_remove:
35
+ * @tb: translation block to remove
36
+ *
37
+ * Remove @tb from the region trees.
38
+ */
39
void tcg_tb_remove(TranslationBlock *tb);
40
+
41
+/**
42
+ * tcg_tb_lookup:
43
+ * @tc_ptr: host PC to look up
44
+ *
45
+ * Look up a translation block inside the region trees by @tc_ptr. This is
46
+ * useful for exception handling, but must not be used for the purposes of
47
+ * executing the returned translation block. See struct tb_tc for more
48
+ * information.
49
+ *
50
+ * Returns: a translation block previously inserted into the region trees,
51
+ * such that @tc_ptr points anywhere inside the code generated for it, or
52
+ * NULL.
53
+ */
54
TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr);
55
+
56
+/**
57
+ * tcg_tb_foreach:
58
+ * @func: callback
59
+ * @user_data: opaque value to pass to @callback
60
+ *
61
+ * Call @func for each translation block inserted into the region trees.
62
+ */
63
void tcg_tb_foreach(GTraverseFunc func, gpointer user_data);
64
+
65
+/**
66
+ * tcg_nb_tbs:
67
+ *
68
+ * Returns: the number of translation blocks inserted into the region trees.
69
+ */
70
size_t tcg_nb_tbs(void);
71
72
/* user-mode: Called with mmap_lock held. */
15
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
73
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
16
index XXXXXXX..XXXXXXX 100644
74
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cpu-exec.c
75
--- a/accel/tcg/cpu-exec.c
18
+++ b/accel/tcg/cpu-exec.c
76
+++ b/accel/tcg/cpu-exec.c
19
@@ -XXX,XX +XXX,XX @@ struct tb_desc {
77
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
20
target_ulong pc;
21
target_ulong cs_base;
22
CPUArchState *env;
23
- tb_page_addr_t phys_page1;
24
+ tb_page_addr_t page_addr0;
25
uint32_t flags;
26
uint32_t cflags;
27
uint32_t trace_vcpu_dstate;
28
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
29
const struct tb_desc *desc = d;
30
31
if (tb->pc == desc->pc &&
32
- tb->page_addr[0] == desc->phys_page1 &&
33
+ tb->page_addr[0] == desc->page_addr0 &&
34
tb->cs_base == desc->cs_base &&
35
tb->flags == desc->flags &&
36
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
37
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
38
if (tb->page_addr[1] == -1) {
39
return true;
40
} else {
41
- tb_page_addr_t phys_page2;
42
- target_ulong virt_page2;
43
+ tb_page_addr_t phys_page1;
44
+ target_ulong virt_page1;
45
46
/*
47
* We know that the first page matched, and an otherwise valid TB
48
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
49
* is different for the new TB. Therefore any exception raised
50
* here by the faulting lookup is not premature.
51
*/
52
- virt_page2 = TARGET_PAGE_ALIGN(desc->pc);
53
- phys_page2 = get_page_addr_code(desc->env, virt_page2);
54
- if (tb->page_addr[1] == phys_page2) {
55
+ virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
56
+ phys_page1 = get_page_addr_code(desc->env, virt_page1);
57
+ if (tb->page_addr[1] == phys_page1) {
58
return true;
59
}
60
}
61
@@ -XXX,XX +XXX,XX @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
62
if (phys_pc == -1) {
63
return NULL;
64
}
65
- desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
66
+ desc.page_addr0 = phys_pc;
67
h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
68
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
78
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
69
}
79
}
70
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
80
71
index XXXXXXX..XXXXXXX 100644
81
-/* Might cause an exception, so have a longjmp destination ready */
72
--- a/accel/tcg/cputlb.c
82
+/**
73
+++ b/accel/tcg/cputlb.c
83
+ * tb_lookup:
74
@@ -XXX,XX +XXX,XX @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
84
+ * @cpu: CPU that will execute the returned translation block
75
can be detected */
85
+ * @pc: guest PC
76
void tlb_protect_code(ram_addr_t ram_addr)
86
+ * @cs_base: arch-specific value associated with translation block
77
{
87
+ * @flags: arch-specific translation block flags
78
- cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
88
+ * @cflags: CF_* flags
79
+ cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
89
+ *
80
+ TARGET_PAGE_SIZE,
90
+ * Look up a translation block inside the QHT using @pc, @cs_base, @flags and
81
DIRTY_MEMORY_CODE);
91
+ * @cflags. Uses @cpu's tb_jmp_cache. Might cause an exception, so have a
82
}
92
+ * longjmp destination ready.
83
93
+ *
84
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
94
+ * Returns: an existing translation block or NULL.
85
index XXXXXXX..XXXXXXX 100644
95
+ */
86
--- a/accel/tcg/translate-all.c
96
static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
87
+++ b/accel/tcg/translate-all.c
97
uint64_t cs_base, uint32_t flags,
88
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
98
uint32_t cflags)
89
qemu_spin_unlock(&tb->jmp_lock);
90
91
/* remove the TB from the hash list */
92
- phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
93
+ phys_pc = tb->page_addr[0];
94
h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
95
tb->trace_vcpu_dstate);
96
if (!qht_remove(&tb_ctx.htable, tb, h)) {
97
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
98
* we can only insert TBs that are fully initialized.
99
*/
100
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
101
- tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
102
+ tb_page_add(p, tb, 0, phys_pc);
103
if (p2) {
104
tb_page_add(p2, tb, 1, phys_page2);
105
} else {
106
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
107
if (n == 0) {
108
/* NOTE: tb_end may be after the end of the page, but
109
it is not a problem */
110
- tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
111
+ tb_start = tb->page_addr[0];
112
tb_end = tb_start + tb->size;
113
} else {
114
tb_start = tb->page_addr[1];
115
- tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
116
+ tb_end = tb_start + ((tb->page_addr[0] + tb->size)
117
+ & ~TARGET_PAGE_MASK);
118
}
119
if (!(tb_end <= start || tb_start >= end)) {
120
#ifdef TARGET_HAS_PRECISE_SMC
121
--
99
--
122
2.34.1
100
2.43.0
123
101
124
102
diff view generated by jsdifflib
1
This bitmap is created and discarded immediately.
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
We gain nothing by its existence.
3
2
3
Currently one-insn TBs created from I/O memory are not added to
4
region_trees. Therefore, when they generate exceptions, they are not
5
handled by cpu_restore_state_from_tb().
6
7
For x86 this is not a problem, because x86_restore_state_to_opc() only
8
restores pc and cc, which already have the correct values if the first
9
TB instruction causes an exception. However, on several other
10
architectures, restore_state_to_opc() is not stricly limited to state
11
restoration and affects some exception-related registers, where guests
12
can notice incorrect values, for example:
13
14
- arm's exception.syndrome;
15
- hppa's unwind_breg;
16
- riscv's excp_uw2;
17
- s390x's int_pgm_ilen.
18
19
Fix by always calling tcg_tb_insert(). This may increase the size of
20
region_trees, but tcg_region_reset_all() clears it once code_gen_buffer
21
fills up, so it will not grow uncontrollably.
22
23
Do not call tb_link_page(), which would add such TBs to the QHT, to
24
prevent tb_lookup() from finding them. These TBs are single-use, since
25
subsequent reads from I/O memory may return different values; they are
26
not removed from code_gen_buffer only in order to keep things simple.
27
28
Co-developed-by: Nina Schoetterl-Glausch <nsg@linux.ibm.com>
29
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
30
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
31
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
32
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-Id: <20220822232338.1727934-2-richard.henderson@linaro.org>
33
Message-ID: <20250116213214.5695-2-iii@linux.ibm.com>
7
---
34
---
8
accel/tcg/translate-all.c | 78 ++-------------------------------------
35
accel/tcg/translate-all.c | 29 +++++++++++++++++++----------
9
1 file changed, 4 insertions(+), 74 deletions(-)
36
1 file changed, 19 insertions(+), 10 deletions(-)
10
37
11
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
38
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
12
index XXXXXXX..XXXXXXX 100644
39
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/translate-all.c
40
--- a/accel/tcg/translate-all.c
14
+++ b/accel/tcg/translate-all.c
41
+++ b/accel/tcg/translate-all.c
15
@@ -XXX,XX +XXX,XX @@
42
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
16
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
43
tb_reset_jump(tb, 1);
17
#endif
44
}
18
45
19
-#define SMC_BITMAP_USE_THRESHOLD 10
46
- /*
47
- * If the TB is not associated with a physical RAM page then it must be
48
- * a temporary one-insn TB, and we have nothing left to do. Return early
49
- * before attempting to link to other TBs or add to the lookup table.
50
- */
51
- if (tb_page_addr0(tb) == -1) {
52
- assert_no_pages_locked();
53
- return tb;
54
- }
20
-
55
-
21
typedef struct PageDesc {
56
/*
22
/* list of TBs intersecting this ram page */
57
* Insert TB into the corresponding region tree before publishing it
23
uintptr_t first_tb;
58
* through QHT. Otherwise rewinding happened in the TB might fail to
24
-#ifdef CONFIG_SOFTMMU
59
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
25
- /* in order to optimize self modifying code, we count the number
60
*/
26
- of lookups we do to a given page to use a bitmap */
61
tcg_tb_insert(tb);
27
- unsigned long *code_bitmap;
62
28
- unsigned int code_write_count;
63
+ /*
29
-#else
64
+ * If the TB is not associated with a physical RAM page then it must be
30
+#ifdef CONFIG_USER_ONLY
65
+ * a temporary one-insn TB.
31
unsigned long flags;
66
+ *
32
void *target_data;
67
+ * Such TBs must be added to region trees in order to make sure that
33
#endif
68
+ * restore_state_to_opc() - which on some architectures is not limited to
34
-#ifndef CONFIG_USER_ONLY
69
+ * rewinding, but also affects exception handling! - is called when such a
35
+#ifdef CONFIG_SOFTMMU
70
+ * TB causes an exception.
36
QemuSpin lock;
71
+ *
37
#endif
72
+ * At the same time, temporary one-insn TBs must be executed at most once,
38
} PageDesc;
73
+ * because subsequent reads from, e.g., I/O memory may return different
39
@@ -XXX,XX +XXX,XX @@ void tb_htable_init(void)
74
+ * values. So return early before attempting to link to other TBs or add
40
qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
75
+ * to the QHT.
41
}
76
+ */
42
77
+ if (tb_page_addr0(tb) == -1) {
43
-/* call with @p->lock held */
78
+ assert_no_pages_locked();
44
-static inline void invalidate_page_bitmap(PageDesc *p)
79
+ return tb;
45
-{
80
+ }
46
- assert_page_locked(p);
81
+
47
-#ifdef CONFIG_SOFTMMU
82
/*
48
- g_free(p->code_bitmap);
83
* No explicit memory barrier is required -- tb_link_page() makes the
49
- p->code_bitmap = NULL;
84
* TB visible in a consistent state.
50
- p->code_write_count = 0;
51
-#endif
52
-}
53
-
54
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
55
static void page_flush_tb_1(int level, void **lp)
56
{
57
@@ -XXX,XX +XXX,XX @@ static void page_flush_tb_1(int level, void **lp)
58
for (i = 0; i < V_L2_SIZE; ++i) {
59
page_lock(&pd[i]);
60
pd[i].first_tb = (uintptr_t)NULL;
61
- invalidate_page_bitmap(pd + i);
62
page_unlock(&pd[i]);
63
}
64
} else {
65
@@ -XXX,XX +XXX,XX @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
66
if (rm_from_page_list) {
67
p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
68
tb_page_remove(p, tb);
69
- invalidate_page_bitmap(p);
70
if (tb->page_addr[1] != -1) {
71
p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
72
tb_page_remove(p, tb);
73
- invalidate_page_bitmap(p);
74
}
75
}
76
77
@@ -XXX,XX +XXX,XX @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
78
}
79
}
80
81
-#ifdef CONFIG_SOFTMMU
82
-/* call with @p->lock held */
83
-static void build_page_bitmap(PageDesc *p)
84
-{
85
- int n, tb_start, tb_end;
86
- TranslationBlock *tb;
87
-
88
- assert_page_locked(p);
89
- p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
90
-
91
- PAGE_FOR_EACH_TB(p, tb, n) {
92
- /* NOTE: this is subtle as a TB may span two physical pages */
93
- if (n == 0) {
94
- /* NOTE: tb_end may be after the end of the page, but
95
- it is not a problem */
96
- tb_start = tb->pc & ~TARGET_PAGE_MASK;
97
- tb_end = tb_start + tb->size;
98
- if (tb_end > TARGET_PAGE_SIZE) {
99
- tb_end = TARGET_PAGE_SIZE;
100
- }
101
- } else {
102
- tb_start = 0;
103
- tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
104
- }
105
- bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
106
- }
107
-}
108
-#endif
109
-
110
/* add the tb in the target page and protect it if necessary
111
*
112
* Called with mmap_lock held for user-mode emulation.
113
@@ -XXX,XX +XXX,XX @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
114
page_already_protected = p->first_tb != (uintptr_t)NULL;
115
#endif
116
p->first_tb = (uintptr_t)tb | n;
117
- invalidate_page_bitmap(p);
118
119
#if defined(CONFIG_USER_ONLY)
120
/* translator_loop() must have made all TB pages non-writable */
121
@@ -XXX,XX +XXX,XX @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
122
/* remove TB from the page(s) if we couldn't insert it */
123
if (unlikely(existing_tb)) {
124
tb_page_remove(p, tb);
125
- invalidate_page_bitmap(p);
126
if (p2) {
127
tb_page_remove(p2, tb);
128
- invalidate_page_bitmap(p2);
129
}
130
tb = existing_tb;
131
}
132
@@ -XXX,XX +XXX,XX @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
133
#if !defined(CONFIG_USER_ONLY)
134
/* if no code remaining, no need to continue to use slow writes */
135
if (!p->first_tb) {
136
- invalidate_page_bitmap(p);
137
tlb_unprotect_code(start);
138
}
139
#endif
140
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_page_fast(struct page_collection *pages,
141
}
142
143
assert_page_locked(p);
144
- if (!p->code_bitmap &&
145
- ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
146
- build_page_bitmap(p);
147
- }
148
- if (p->code_bitmap) {
149
- unsigned int nr;
150
- unsigned long b;
151
-
152
- nr = start & ~TARGET_PAGE_MASK;
153
- b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
154
- if (b & ((1 << len) - 1)) {
155
- goto do_invalidate;
156
- }
157
- } else {
158
- do_invalidate:
159
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
160
- retaddr);
161
- }
162
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
163
+ retaddr);
164
}
165
#else
166
/* Called with mmap_lock held. If pc is not 0 then it indicates the
167
--
85
--
168
2.34.1
86
2.43.0
169
87
170
88
diff view generated by jsdifflib
1
Populate this new method for all targets. Always match
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
the result that would be given by cpu_get_tb_cpu_state,
3
as we will want these values to correspond in the logs.
4
2
5
Reviewed-by: Taylor Simpson <tsimpson@quicinc.com>
3
These helpers don't alter float_status. Make it const.
6
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
7
Reviewed-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> (target/sparc)
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-ID: <20250116214359.67295-1-philmd@linaro.org>
9
---
9
---
10
Cc: Eduardo Habkost <eduardo@habkost.net> (supporter:Machine core)
10
include/fpu/softfloat-helpers.h | 25 ++++++++++++++-----------
11
Cc: Marcel Apfelbaum <marcel.apfelbaum@gmail.com> (supporter:Machine core)
11
1 file changed, 14 insertions(+), 11 deletions(-)
12
Cc: "Philippe Mathieu-Daudé" <f4bug@amsat.org> (reviewer:Machine core)
13
Cc: Yanan Wang <wangyanan55@huawei.com> (reviewer:Machine core)
14
Cc: Michael Rolnik <mrolnik@gmail.com> (maintainer:AVR TCG CPUs)
15
Cc: "Edgar E. Iglesias" <edgar.iglesias@gmail.com> (maintainer:CRIS TCG CPUs)
16
Cc: Taylor Simpson <tsimpson@quicinc.com> (supporter:Hexagon TCG CPUs)
17
Cc: Song Gao <gaosong@loongson.cn> (maintainer:LoongArch TCG CPUs)
18
Cc: Xiaojuan Yang <yangxiaojuan@loongson.cn> (maintainer:LoongArch TCG CPUs)
19
Cc: Laurent Vivier <laurent@vivier.eu> (maintainer:M68K TCG CPUs)
20
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> (reviewer:MIPS TCG CPUs)
21
Cc: Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> (reviewer:MIPS TCG CPUs)
22
Cc: Chris Wulff <crwulff@gmail.com> (maintainer:NiosII TCG CPUs)
23
Cc: Marek Vasut <marex@denx.de> (maintainer:NiosII TCG CPUs)
24
Cc: Stafford Horne <shorne@gmail.com> (odd fixer:OpenRISC TCG CPUs)
25
Cc: Yoshinori Sato <ysato@users.sourceforge.jp> (reviewer:RENESAS RX CPUs)
26
Cc: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> (maintainer:SPARC TCG CPUs)
27
Cc: Bastian Koppelmann <kbastian@mail.uni-paderborn.de> (maintainer:TriCore TCG CPUs)
28
Cc: Max Filippov <jcmvbkbc@gmail.com> (maintainer:Xtensa TCG CPUs)
29
Cc: qemu-arm@nongnu.org (open list:ARM TCG CPUs)
30
Cc: qemu-ppc@nongnu.org (open list:PowerPC TCG CPUs)
31
Cc: qemu-riscv@nongnu.org (open list:RISC-V TCG CPUs)
32
Cc: qemu-s390x@nongnu.org (open list:S390 TCG CPUs)
33
---
34
include/hw/core/cpu.h | 3 +++
35
target/alpha/cpu.c | 9 +++++++++
36
target/arm/cpu.c | 13 +++++++++++++
37
target/avr/cpu.c | 8 ++++++++
38
target/cris/cpu.c | 8 ++++++++
39
target/hexagon/cpu.c | 8 ++++++++
40
target/hppa/cpu.c | 8 ++++++++
41
target/i386/cpu.c | 9 +++++++++
42
target/loongarch/cpu.c | 9 +++++++++
43
target/m68k/cpu.c | 8 ++++++++
44
target/microblaze/cpu.c | 8 ++++++++
45
target/mips/cpu.c | 8 ++++++++
46
target/nios2/cpu.c | 9 +++++++++
47
target/openrisc/cpu.c | 8 ++++++++
48
target/ppc/cpu_init.c | 8 ++++++++
49
target/riscv/cpu.c | 13 +++++++++++++
50
target/rx/cpu.c | 8 ++++++++
51
target/s390x/cpu.c | 8 ++++++++
52
target/sh4/cpu.c | 8 ++++++++
53
target/sparc/cpu.c | 8 ++++++++
54
target/tricore/cpu.c | 9 +++++++++
55
target/xtensa/cpu.c | 8 ++++++++
56
22 files changed, 186 insertions(+)
57
12
58
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
13
diff --git a/include/fpu/softfloat-helpers.h b/include/fpu/softfloat-helpers.h
59
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
60
--- a/include/hw/core/cpu.h
15
--- a/include/fpu/softfloat-helpers.h
61
+++ b/include/hw/core/cpu.h
16
+++ b/include/fpu/softfloat-helpers.h
62
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
17
@@ -XXX,XX +XXX,XX @@ static inline void set_no_signaling_nans(bool val, float_status *status)
63
* If the target behaviour here is anything other than "set
18
status->no_signaling_nans = val;
64
* the PC register to the value passed in" then the target must
65
* also implement the synchronize_from_tb hook.
66
+ * @get_pc: Callback for getting the Program Counter register.
67
+ * As above, with the semantics of the target architecture.
68
* @gdb_read_register: Callback for letting GDB read a register.
69
* @gdb_write_register: Callback for letting GDB write a register.
70
* @gdb_adjust_breakpoint: Callback for adjusting the address of a
71
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
72
void (*dump_state)(CPUState *cpu, FILE *, int flags);
73
int64_t (*get_arch_id)(CPUState *cpu);
74
void (*set_pc)(CPUState *cpu, vaddr value);
75
+ vaddr (*get_pc)(CPUState *cpu);
76
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
77
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
78
vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
79
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/alpha/cpu.c
82
+++ b/target/alpha/cpu.c
83
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_set_pc(CPUState *cs, vaddr value)
84
cpu->env.pc = value;
85
}
19
}
86
20
87
+static vaddr alpha_cpu_get_pc(CPUState *cs)
21
-static inline bool get_float_detect_tininess(float_status *status)
88
+{
22
+static inline bool get_float_detect_tininess(const float_status *status)
89
+ AlphaCPU *cpu = ALPHA_CPU(cs);
90
+
91
+ return cpu->env.pc;
92
+}
93
+
94
+
95
static bool alpha_cpu_has_work(CPUState *cs)
96
{
23
{
97
/* Here we are checking to see if the CPU should wake up from HALT.
24
return status->tininess_before_rounding;
98
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
99
cc->has_work = alpha_cpu_has_work;
100
cc->dump_state = alpha_cpu_dump_state;
101
cc->set_pc = alpha_cpu_set_pc;
102
+ cc->get_pc = alpha_cpu_get_pc;
103
cc->gdb_read_register = alpha_cpu_gdb_read_register;
104
cc->gdb_write_register = alpha_cpu_gdb_write_register;
105
#ifndef CONFIG_USER_ONLY
106
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
107
index XXXXXXX..XXXXXXX 100644
108
--- a/target/arm/cpu.c
109
+++ b/target/arm/cpu.c
110
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_set_pc(CPUState *cs, vaddr value)
111
}
112
}
25
}
113
26
114
+static vaddr arm_cpu_get_pc(CPUState *cs)
27
-static inline FloatRoundMode get_float_rounding_mode(float_status *status)
115
+{
28
+static inline FloatRoundMode get_float_rounding_mode(const float_status *status)
116
+ ARMCPU *cpu = ARM_CPU(cs);
29
{
117
+ CPUARMState *env = &cpu->env;
30
return status->float_rounding_mode;
118
+
119
+ if (is_a64(env)) {
120
+ return env->pc;
121
+ } else {
122
+ return env->regs[15];
123
+ }
124
+}
125
+
126
#ifdef CONFIG_TCG
127
void arm_cpu_synchronize_from_tb(CPUState *cs,
128
const TranslationBlock *tb)
129
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
130
cc->has_work = arm_cpu_has_work;
131
cc->dump_state = arm_cpu_dump_state;
132
cc->set_pc = arm_cpu_set_pc;
133
+ cc->get_pc = arm_cpu_get_pc;
134
cc->gdb_read_register = arm_cpu_gdb_read_register;
135
cc->gdb_write_register = arm_cpu_gdb_write_register;
136
#ifndef CONFIG_USER_ONLY
137
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
138
index XXXXXXX..XXXXXXX 100644
139
--- a/target/avr/cpu.c
140
+++ b/target/avr/cpu.c
141
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_set_pc(CPUState *cs, vaddr value)
142
cpu->env.pc_w = value / 2; /* internally PC points to words */
143
}
31
}
144
32
145
+static vaddr avr_cpu_get_pc(CPUState *cs)
33
-static inline int get_float_exception_flags(float_status *status)
146
+{
34
+static inline int get_float_exception_flags(const float_status *status)
147
+ AVRCPU *cpu = AVR_CPU(cs);
148
+
149
+ return cpu->env.pc_w * 2;
150
+}
151
+
152
static bool avr_cpu_has_work(CPUState *cs)
153
{
35
{
154
AVRCPU *cpu = AVR_CPU(cs);
36
return status->float_exception_flags;
155
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
156
cc->has_work = avr_cpu_has_work;
157
cc->dump_state = avr_cpu_dump_state;
158
cc->set_pc = avr_cpu_set_pc;
159
+ cc->get_pc = avr_cpu_get_pc;
160
dc->vmsd = &vms_avr_cpu;
161
cc->sysemu_ops = &avr_sysemu_ops;
162
cc->disas_set_info = avr_cpu_disas_set_info;
163
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/target/cris/cpu.c
166
+++ b/target/cris/cpu.c
167
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_set_pc(CPUState *cs, vaddr value)
168
cpu->env.pc = value;
169
}
37
}
170
38
171
+static vaddr cris_cpu_get_pc(CPUState *cs)
39
static inline FloatX80RoundPrec
172
+{
40
-get_floatx80_rounding_precision(float_status *status)
173
+ CRISCPU *cpu = CRIS_CPU(cs);
41
+get_floatx80_rounding_precision(const float_status *status)
174
+
175
+ return cpu->env.pc;
176
+}
177
+
178
static bool cris_cpu_has_work(CPUState *cs)
179
{
42
{
180
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
43
return status->floatx80_rounding_precision;
181
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
182
cc->has_work = cris_cpu_has_work;
183
cc->dump_state = cris_cpu_dump_state;
184
cc->set_pc = cris_cpu_set_pc;
185
+ cc->get_pc = cris_cpu_get_pc;
186
cc->gdb_read_register = cris_cpu_gdb_read_register;
187
cc->gdb_write_register = cris_cpu_gdb_write_register;
188
#ifndef CONFIG_USER_ONLY
189
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
190
index XXXXXXX..XXXXXXX 100644
191
--- a/target/hexagon/cpu.c
192
+++ b/target/hexagon/cpu.c
193
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_set_pc(CPUState *cs, vaddr value)
194
env->gpr[HEX_REG_PC] = value;
195
}
44
}
196
45
197
+static vaddr hexagon_cpu_get_pc(CPUState *cs)
46
-static inline Float2NaNPropRule get_float_2nan_prop_rule(float_status *status)
198
+{
47
+static inline Float2NaNPropRule
199
+ HexagonCPU *cpu = HEXAGON_CPU(cs);
48
+get_float_2nan_prop_rule(const float_status *status)
200
+ CPUHexagonState *env = &cpu->env;
201
+ return env->gpr[HEX_REG_PC];
202
+}
203
+
204
static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
205
const TranslationBlock *tb)
206
{
49
{
207
@@ -XXX,XX +XXX,XX @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data)
50
return status->float_2nan_prop_rule;
208
cc->has_work = hexagon_cpu_has_work;
209
cc->dump_state = hexagon_dump_state;
210
cc->set_pc = hexagon_cpu_set_pc;
211
+ cc->get_pc = hexagon_cpu_get_pc;
212
cc->gdb_read_register = hexagon_gdb_read_register;
213
cc->gdb_write_register = hexagon_gdb_write_register;
214
cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS + NUM_VREGS + NUM_QREGS;
215
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
216
index XXXXXXX..XXXXXXX 100644
217
--- a/target/hppa/cpu.c
218
+++ b/target/hppa/cpu.c
219
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
220
cpu->env.iaoq_b = value + 4;
221
}
51
}
222
52
223
+static vaddr hppa_cpu_get_pc(CPUState *cs)
53
-static inline Float3NaNPropRule get_float_3nan_prop_rule(float_status *status)
224
+{
54
+static inline Float3NaNPropRule
225
+ HPPACPU *cpu = HPPA_CPU(cs);
55
+get_float_3nan_prop_rule(const float_status *status)
226
+
227
+ return cpu->env.iaoq_f;
228
+}
229
+
230
static void hppa_cpu_synchronize_from_tb(CPUState *cs,
231
const TranslationBlock *tb)
232
{
56
{
233
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
57
return status->float_3nan_prop_rule;
234
cc->has_work = hppa_cpu_has_work;
235
cc->dump_state = hppa_cpu_dump_state;
236
cc->set_pc = hppa_cpu_set_pc;
237
+ cc->get_pc = hppa_cpu_get_pc;
238
cc->gdb_read_register = hppa_cpu_gdb_read_register;
239
cc->gdb_write_register = hppa_cpu_gdb_write_register;
240
#ifndef CONFIG_USER_ONLY
241
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
242
index XXXXXXX..XXXXXXX 100644
243
--- a/target/i386/cpu.c
244
+++ b/target/i386/cpu.c
245
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_set_pc(CPUState *cs, vaddr value)
246
cpu->env.eip = value;
247
}
58
}
248
59
249
+static vaddr x86_cpu_get_pc(CPUState *cs)
60
-static inline FloatInfZeroNaNRule get_float_infzeronan_rule(float_status *status)
250
+{
61
+static inline FloatInfZeroNaNRule
251
+ X86CPU *cpu = X86_CPU(cs);
62
+get_float_infzeronan_rule(const float_status *status)
252
+
253
+ /* Match cpu_get_tb_cpu_state. */
254
+ return cpu->env.eip + cpu->env.segs[R_CS].base;
255
+}
256
+
257
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
258
{
63
{
259
X86CPU *cpu = X86_CPU(cs);
64
return status->float_infzeronan_rule;
260
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
261
cc->has_work = x86_cpu_has_work;
262
cc->dump_state = x86_cpu_dump_state;
263
cc->set_pc = x86_cpu_set_pc;
264
+ cc->get_pc = x86_cpu_get_pc;
265
cc->gdb_read_register = x86_cpu_gdb_read_register;
266
cc->gdb_write_register = x86_cpu_gdb_write_register;
267
cc->get_arch_id = x86_cpu_get_arch_id;
268
diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c
269
index XXXXXXX..XXXXXXX 100644
270
--- a/target/loongarch/cpu.c
271
+++ b/target/loongarch/cpu.c
272
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
273
env->pc = value;
274
}
65
}
275
66
276
+static vaddr loongarch_cpu_get_pc(CPUState *cs)
67
-static inline uint8_t get_float_default_nan_pattern(float_status *status)
277
+{
68
+static inline uint8_t get_float_default_nan_pattern(const float_status *status)
278
+ LoongArchCPU *cpu = LOONGARCH_CPU(cs);
69
{
279
+ CPULoongArchState *env = &cpu->env;
70
return status->default_nan_pattern;
280
+
281
+ return env->pc;
282
+}
283
+
284
#ifndef CONFIG_USER_ONLY
285
#include "hw/loongarch/virt.h"
286
287
@@ -XXX,XX +XXX,XX @@ static void loongarch_cpu_class_init(ObjectClass *c, void *data)
288
cc->has_work = loongarch_cpu_has_work;
289
cc->dump_state = loongarch_cpu_dump_state;
290
cc->set_pc = loongarch_cpu_set_pc;
291
+ cc->get_pc = loongarch_cpu_get_pc;
292
#ifndef CONFIG_USER_ONLY
293
dc->vmsd = &vmstate_loongarch_cpu;
294
cc->sysemu_ops = &loongarch_sysemu_ops;
295
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
296
index XXXXXXX..XXXXXXX 100644
297
--- a/target/m68k/cpu.c
298
+++ b/target/m68k/cpu.c
299
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
300
cpu->env.pc = value;
301
}
71
}
302
72
303
+static vaddr m68k_cpu_get_pc(CPUState *cs)
73
-static inline bool get_flush_to_zero(float_status *status)
304
+{
74
+static inline bool get_flush_to_zero(const float_status *status)
305
+ M68kCPU *cpu = M68K_CPU(cs);
306
+
307
+ return cpu->env.pc;
308
+}
309
+
310
static bool m68k_cpu_has_work(CPUState *cs)
311
{
75
{
312
return cs->interrupt_request & CPU_INTERRUPT_HARD;
76
return status->flush_to_zero;
313
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
314
cc->has_work = m68k_cpu_has_work;
315
cc->dump_state = m68k_cpu_dump_state;
316
cc->set_pc = m68k_cpu_set_pc;
317
+ cc->get_pc = m68k_cpu_get_pc;
318
cc->gdb_read_register = m68k_cpu_gdb_read_register;
319
cc->gdb_write_register = m68k_cpu_gdb_write_register;
320
#if defined(CONFIG_SOFTMMU)
321
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
322
index XXXXXXX..XXXXXXX 100644
323
--- a/target/microblaze/cpu.c
324
+++ b/target/microblaze/cpu.c
325
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_set_pc(CPUState *cs, vaddr value)
326
cpu->env.iflags = 0;
327
}
77
}
328
78
329
+static vaddr mb_cpu_get_pc(CPUState *cs)
79
-static inline bool get_flush_inputs_to_zero(float_status *status)
330
+{
80
+static inline bool get_flush_inputs_to_zero(const float_status *status)
331
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
332
+
333
+ return cpu->env.pc;
334
+}
335
+
336
static void mb_cpu_synchronize_from_tb(CPUState *cs,
337
const TranslationBlock *tb)
338
{
81
{
339
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
82
return status->flush_inputs_to_zero;
340
341
cc->dump_state = mb_cpu_dump_state;
342
cc->set_pc = mb_cpu_set_pc;
343
+ cc->get_pc = mb_cpu_get_pc;
344
cc->gdb_read_register = mb_cpu_gdb_read_register;
345
cc->gdb_write_register = mb_cpu_gdb_write_register;
346
347
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
348
index XXXXXXX..XXXXXXX 100644
349
--- a/target/mips/cpu.c
350
+++ b/target/mips/cpu.c
351
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_set_pc(CPUState *cs, vaddr value)
352
mips_env_set_pc(&cpu->env, value);
353
}
83
}
354
84
355
+static vaddr mips_cpu_get_pc(CPUState *cs)
85
-static inline bool get_default_nan_mode(float_status *status)
356
+{
86
+static inline bool get_default_nan_mode(const float_status *status)
357
+ MIPSCPU *cpu = MIPS_CPU(cs);
358
+
359
+ return cpu->env.active_tc.PC;
360
+}
361
+
362
static bool mips_cpu_has_work(CPUState *cs)
363
{
87
{
364
MIPSCPU *cpu = MIPS_CPU(cs);
88
return status->default_nan_mode;
365
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
366
cc->has_work = mips_cpu_has_work;
367
cc->dump_state = mips_cpu_dump_state;
368
cc->set_pc = mips_cpu_set_pc;
369
+ cc->get_pc = mips_cpu_get_pc;
370
cc->gdb_read_register = mips_cpu_gdb_read_register;
371
cc->gdb_write_register = mips_cpu_gdb_write_register;
372
#ifndef CONFIG_USER_ONLY
373
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
374
index XXXXXXX..XXXXXXX 100644
375
--- a/target/nios2/cpu.c
376
+++ b/target/nios2/cpu.c
377
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_set_pc(CPUState *cs, vaddr value)
378
env->pc = value;
379
}
89
}
380
381
+static vaddr nios2_cpu_get_pc(CPUState *cs)
382
+{
383
+ Nios2CPU *cpu = NIOS2_CPU(cs);
384
+ CPUNios2State *env = &cpu->env;
385
+
386
+ return env->pc;
387
+}
388
+
389
static bool nios2_cpu_has_work(CPUState *cs)
390
{
391
return cs->interrupt_request & CPU_INTERRUPT_HARD;
392
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
393
cc->has_work = nios2_cpu_has_work;
394
cc->dump_state = nios2_cpu_dump_state;
395
cc->set_pc = nios2_cpu_set_pc;
396
+ cc->get_pc = nios2_cpu_get_pc;
397
cc->disas_set_info = nios2_cpu_disas_set_info;
398
#ifndef CONFIG_USER_ONLY
399
cc->sysemu_ops = &nios2_sysemu_ops;
400
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
401
index XXXXXXX..XXXXXXX 100644
402
--- a/target/openrisc/cpu.c
403
+++ b/target/openrisc/cpu.c
404
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
405
cpu->env.dflag = 0;
406
}
407
408
+static vaddr openrisc_cpu_get_pc(CPUState *cs)
409
+{
410
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
411
+
412
+ return cpu->env.pc;
413
+}
414
+
415
static void openrisc_cpu_synchronize_from_tb(CPUState *cs,
416
const TranslationBlock *tb)
417
{
418
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
419
cc->has_work = openrisc_cpu_has_work;
420
cc->dump_state = openrisc_cpu_dump_state;
421
cc->set_pc = openrisc_cpu_set_pc;
422
+ cc->get_pc = openrisc_cpu_get_pc;
423
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
424
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
425
#ifndef CONFIG_USER_ONLY
426
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
427
index XXXXXXX..XXXXXXX 100644
428
--- a/target/ppc/cpu_init.c
429
+++ b/target/ppc/cpu_init.c
430
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_set_pc(CPUState *cs, vaddr value)
431
cpu->env.nip = value;
432
}
433
434
+static vaddr ppc_cpu_get_pc(CPUState *cs)
435
+{
436
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
437
+
438
+ return cpu->env.nip;
439
+}
440
+
441
static bool ppc_cpu_has_work(CPUState *cs)
442
{
443
PowerPCCPU *cpu = POWERPC_CPU(cs);
444
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
445
cc->has_work = ppc_cpu_has_work;
446
cc->dump_state = ppc_cpu_dump_state;
447
cc->set_pc = ppc_cpu_set_pc;
448
+ cc->get_pc = ppc_cpu_get_pc;
449
cc->gdb_read_register = ppc_cpu_gdb_read_register;
450
cc->gdb_write_register = ppc_cpu_gdb_write_register;
451
#ifndef CONFIG_USER_ONLY
452
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
453
index XXXXXXX..XXXXXXX 100644
454
--- a/target/riscv/cpu.c
455
+++ b/target/riscv/cpu.c
456
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
457
}
458
}
459
460
+static vaddr riscv_cpu_get_pc(CPUState *cs)
461
+{
462
+ RISCVCPU *cpu = RISCV_CPU(cs);
463
+ CPURISCVState *env = &cpu->env;
464
+
465
+ /* Match cpu_get_tb_cpu_state. */
466
+ if (env->xl == MXL_RV32) {
467
+ return env->pc & UINT32_MAX;
468
+ }
469
+ return env->pc;
470
+}
471
+
472
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
473
const TranslationBlock *tb)
474
{
475
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
476
cc->has_work = riscv_cpu_has_work;
477
cc->dump_state = riscv_cpu_dump_state;
478
cc->set_pc = riscv_cpu_set_pc;
479
+ cc->get_pc = riscv_cpu_get_pc;
480
cc->gdb_read_register = riscv_cpu_gdb_read_register;
481
cc->gdb_write_register = riscv_cpu_gdb_write_register;
482
cc->gdb_num_core_regs = 33;
483
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
484
index XXXXXXX..XXXXXXX 100644
485
--- a/target/rx/cpu.c
486
+++ b/target/rx/cpu.c
487
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_set_pc(CPUState *cs, vaddr value)
488
cpu->env.pc = value;
489
}
490
491
+static vaddr rx_cpu_get_pc(CPUState *cs)
492
+{
493
+ RXCPU *cpu = RX_CPU(cs);
494
+
495
+ return cpu->env.pc;
496
+}
497
+
498
static void rx_cpu_synchronize_from_tb(CPUState *cs,
499
const TranslationBlock *tb)
500
{
501
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
502
cc->has_work = rx_cpu_has_work;
503
cc->dump_state = rx_cpu_dump_state;
504
cc->set_pc = rx_cpu_set_pc;
505
+ cc->get_pc = rx_cpu_get_pc;
506
507
#ifndef CONFIG_USER_ONLY
508
cc->sysemu_ops = &rx_sysemu_ops;
509
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
510
index XXXXXXX..XXXXXXX 100644
511
--- a/target/s390x/cpu.c
512
+++ b/target/s390x/cpu.c
513
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_set_pc(CPUState *cs, vaddr value)
514
cpu->env.psw.addr = value;
515
}
516
517
+static vaddr s390_cpu_get_pc(CPUState *cs)
518
+{
519
+ S390CPU *cpu = S390_CPU(cs);
520
+
521
+ return cpu->env.psw.addr;
522
+}
523
+
524
static bool s390_cpu_has_work(CPUState *cs)
525
{
526
S390CPU *cpu = S390_CPU(cs);
527
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
528
cc->has_work = s390_cpu_has_work;
529
cc->dump_state = s390_cpu_dump_state;
530
cc->set_pc = s390_cpu_set_pc;
531
+ cc->get_pc = s390_cpu_get_pc;
532
cc->gdb_read_register = s390_cpu_gdb_read_register;
533
cc->gdb_write_register = s390_cpu_gdb_write_register;
534
#ifndef CONFIG_USER_ONLY
535
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
536
index XXXXXXX..XXXXXXX 100644
537
--- a/target/sh4/cpu.c
538
+++ b/target/sh4/cpu.c
539
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_set_pc(CPUState *cs, vaddr value)
540
cpu->env.pc = value;
541
}
542
543
+static vaddr superh_cpu_get_pc(CPUState *cs)
544
+{
545
+ SuperHCPU *cpu = SUPERH_CPU(cs);
546
+
547
+ return cpu->env.pc;
548
+}
549
+
550
static void superh_cpu_synchronize_from_tb(CPUState *cs,
551
const TranslationBlock *tb)
552
{
553
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
554
cc->has_work = superh_cpu_has_work;
555
cc->dump_state = superh_cpu_dump_state;
556
cc->set_pc = superh_cpu_set_pc;
557
+ cc->get_pc = superh_cpu_get_pc;
558
cc->gdb_read_register = superh_cpu_gdb_read_register;
559
cc->gdb_write_register = superh_cpu_gdb_write_register;
560
#ifndef CONFIG_USER_ONLY
561
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
562
index XXXXXXX..XXXXXXX 100644
563
--- a/target/sparc/cpu.c
564
+++ b/target/sparc/cpu.c
565
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_set_pc(CPUState *cs, vaddr value)
566
cpu->env.npc = value + 4;
567
}
568
569
+static vaddr sparc_cpu_get_pc(CPUState *cs)
570
+{
571
+ SPARCCPU *cpu = SPARC_CPU(cs);
572
+
573
+ return cpu->env.pc;
574
+}
575
+
576
static void sparc_cpu_synchronize_from_tb(CPUState *cs,
577
const TranslationBlock *tb)
578
{
579
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
580
cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
581
#endif
582
cc->set_pc = sparc_cpu_set_pc;
583
+ cc->get_pc = sparc_cpu_get_pc;
584
cc->gdb_read_register = sparc_cpu_gdb_read_register;
585
cc->gdb_write_register = sparc_cpu_gdb_write_register;
586
#ifndef CONFIG_USER_ONLY
587
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
588
index XXXXXXX..XXXXXXX 100644
589
--- a/target/tricore/cpu.c
590
+++ b/target/tricore/cpu.c
591
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_set_pc(CPUState *cs, vaddr value)
592
env->PC = value & ~(target_ulong)1;
593
}
594
595
+static vaddr tricore_cpu_get_pc(CPUState *cs)
596
+{
597
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
598
+ CPUTriCoreState *env = &cpu->env;
599
+
600
+ return env->PC;
601
+}
602
+
603
static void tricore_cpu_synchronize_from_tb(CPUState *cs,
604
const TranslationBlock *tb)
605
{
606
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
607
608
cc->dump_state = tricore_cpu_dump_state;
609
cc->set_pc = tricore_cpu_set_pc;
610
+ cc->get_pc = tricore_cpu_get_pc;
611
cc->sysemu_ops = &tricore_sysemu_ops;
612
cc->tcg_ops = &tricore_tcg_ops;
613
}
614
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
615
index XXXXXXX..XXXXXXX 100644
616
--- a/target/xtensa/cpu.c
617
+++ b/target/xtensa/cpu.c
618
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_set_pc(CPUState *cs, vaddr value)
619
cpu->env.pc = value;
620
}
621
622
+static vaddr xtensa_cpu_get_pc(CPUState *cs)
623
+{
624
+ XtensaCPU *cpu = XTENSA_CPU(cs);
625
+
626
+ return cpu->env.pc;
627
+}
628
+
629
static bool xtensa_cpu_has_work(CPUState *cs)
630
{
631
#ifndef CONFIG_USER_ONLY
632
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
633
cc->has_work = xtensa_cpu_has_work;
634
cc->dump_state = xtensa_cpu_dump_state;
635
cc->set_pc = xtensa_cpu_set_pc;
636
+ cc->get_pc = xtensa_cpu_get_pc;
637
cc->gdb_read_register = xtensa_cpu_gdb_read_register;
638
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
639
cc->gdb_stop_before_watchpoint = true;
640
--
90
--
641
2.34.1
91
2.43.0
642
92
643
93
diff view generated by jsdifflib