1
The following changes since commit e93ded1bf6c94ab95015b33e188bc8b0b0c32670:
1
The following changes since commit 005ad32358f12fe9313a4a01918a55e60d4f39e5:
2
2
3
Merge tag 'testing-pull-request-2022-08-30' of https://gitlab.com/thuth/qemu into staging (2022-08-31 18:19:03 -0400)
3
Merge tag 'pull-tpm-2023-09-12-3' of https://github.com/stefanberger/qemu-tpm into staging (2023-09-13 13:41:57 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20220901
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230915
8
8
9
for you to fetch changes up to 20011be2e30b8aa8ef1fc258485f00c688703deb:
9
for you to fetch changes up to e0d9f49c143359b4a34cb80737af57228c62a008:
10
10
11
target/riscv: Make translator stop before the end of a page (2022-09-01 07:43:08 +0100)
11
accel/tcg: Restrict tcg_exec_[un]realizefn() to TCG (2023-09-15 19:06:29 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Respect PROT_EXEC in user-only mode.
14
*: Delete checks for old host definitions
15
Fix s390x, i386 and riscv for translations crossing a page.
15
tcg/loongarch64: Generate LSX instructions
16
fpu: Add conversions between bfloat16 and [u]int8
17
fpu: Handle m68k extended precision denormals properly
18
accel/tcg: Improve cputlb i/o organization
19
accel/tcg: Simplify tlb_plugin_lookup
20
accel/tcg: Remove false-negative halted assertion
21
tcg: Add gvec compare with immediate and scalar operand
22
tcg/aarch64: Emit BTI insns at jump landing pads
16
23
17
----------------------------------------------------------------
24
----------------------------------------------------------------
18
Ilya Leoshkevich (4):
25
Akihiko Odaki (3):
19
linux-user: Clear translations on mprotect()
26
util: Delete checks for old host definitions
20
accel/tcg: Introduce is_same_page()
27
softmmu: Delete checks for old host definitions
21
target/s390x: Make translator stop before the end of a page
28
thunk: Delete checks for old host definitions
22
target/i386: Make translator stop before the end of a page
29
23
30
Anton Johansson (9):
24
Richard Henderson (16):
31
target/arm: Replace TARGET_PAGE_ENTRY_EXTRA
25
linux-user/arm: Mark the commpage executable
32
accel/tcg: Modify tlb_*() to use CPUState
26
linux-user/hppa: Allocate page zero as a commpage
33
accel/tcg: Modify probe_access_internal() to use CPUState
27
linux-user/x86_64: Allocate vsyscall page as a commpage
34
accel/tcg: Modify memory access functions to use CPUState
28
linux-user: Honor PT_GNU_STACK
35
accel/tcg: Modify atomic_mmu_lookup() to use CPUState
29
tests/tcg/i386: Move smc_code2 to an executable section
36
accel/tcg: Use CPUState in atomicity helpers
30
accel/tcg: Properly implement get_page_addr_code for user-only
37
accel/tcg: Remove env_tlb()
31
accel/tcg: Unlock mmap_lock after longjmp
38
accel/tcg: Unify user and softmmu do_[st|ld]*_mmu()
32
accel/tcg: Make tb_htable_lookup static
39
accel/tcg: move ld/st helpers to ldst_common.c.inc
33
accel/tcg: Move qemu_ram_addr_from_host_nofail to physmem.c
40
34
accel/tcg: Use probe_access_internal for softmmu get_page_addr_code_hostp
41
Jiajie Chen (16):
35
accel/tcg: Document the faulting lookup in tb_lookup_cmp
42
tcg/loongarch64: Import LSX instructions
36
accel/tcg: Remove translator_ldsw
43
tcg/loongarch64: Lower basic tcg vec ops to LSX
37
accel/tcg: Add pc and host_pc params to gen_intermediate_code
44
tcg: pass vece to tcg_target_const_match()
38
accel/tcg: Add fast path for translator_ld*
45
tcg/loongarch64: Lower cmp_vec to vseq/vsle/vslt
39
target/riscv: Add MAX_INSN_LEN and insn_len
46
tcg/loongarch64: Lower add/sub_vec to vadd/vsub
40
target/riscv: Make translator stop before the end of a page
47
tcg/loongarch64: Lower vector bitwise operations
41
48
tcg/loongarch64: Lower neg_vec to vneg
42
include/elf.h | 1 +
49
tcg/loongarch64: Lower mul_vec to vmul
43
include/exec/cpu-common.h | 1 +
50
tcg/loongarch64: Lower vector min max ops
44
include/exec/exec-all.h | 89 ++++++++----------------
51
tcg/loongarch64: Lower vector saturated ops
45
include/exec/translator.h | 96 ++++++++++++++++---------
52
tcg/loongarch64: Lower vector shift vector ops
46
linux-user/arm/target_cpu.h | 4 +-
53
tcg/loongarch64: Lower bitsel_vec to vbitsel
47
linux-user/qemu.h | 1 +
54
tcg/loongarch64: Lower vector shift integer ops
48
accel/tcg/cpu-exec.c | 143 ++++++++++++++++++++------------------
55
tcg/loongarch64: Lower rotv_vec ops to LSX
49
accel/tcg/cputlb.c | 93 +++++++------------------
56
tcg/loongarch64: Lower rotli_vec to vrotri
50
accel/tcg/translate-all.c | 29 ++++----
57
tcg/loongarch64: Implement 128-bit load & store
51
accel/tcg/translator.c | 135 ++++++++++++++++++++++++++---------
58
52
accel/tcg/user-exec.c | 17 ++++-
59
LIU Zhiwei (2):
53
linux-user/elfload.c | 82 ++++++++++++++++++++--
60
accel/tcg: Fix the comment for CPUTLBEntryFull
54
linux-user/mmap.c | 6 +-
61
fpu: Add conversions between bfloat16 and [u]int8
55
softmmu/physmem.c | 12 ++++
62
56
target/alpha/translate.c | 5 +-
63
Nicholas Piggin (1):
57
target/arm/translate.c | 5 +-
64
accel/tcg: mttcg remove false-negative halted assertion
58
target/avr/translate.c | 5 +-
65
59
target/cris/translate.c | 5 +-
66
Philippe Mathieu-Daudé (16):
60
target/hexagon/translate.c | 6 +-
67
exec: Make EXCP_FOO definitions target agnostic
61
target/hppa/translate.c | 5 +-
68
exec: Move cpu_loop_foo() target agnostic functions to 'cpu-common.h'
62
target/i386/tcg/translate.c | 71 +++++++++++--------
69
accel/tcg: Restrict dump_exec_info() declaration
63
target/loongarch/translate.c | 6 +-
70
accel: Make accel-blocker.o target agnostic
64
target/m68k/translate.c | 5 +-
71
accel: Rename accel-common.c -> accel-target.c
65
target/microblaze/translate.c | 5 +-
72
exec: Rename cpu.c -> cpu-target.c
66
target/mips/tcg/translate.c | 5 +-
73
exec: Rename target specific page-vary.c -> page-vary-target.c
67
target/nios2/translate.c | 5 +-
74
accel/tcg: Rename target-specific 'internal.h' -> 'internal-target.h'
68
target/openrisc/translate.c | 6 +-
75
accel/tcg: Make monitor.c a target-agnostic unit
69
target/ppc/translate.c | 5 +-
76
accel/tcg: Make icount.o a target agnostic unit
70
target/riscv/translate.c | 32 +++++++--
77
accel/tcg: Make cpu-exec-common.c a target agnostic unit
71
target/rx/translate.c | 5 +-
78
accel: Rename accel_cpu_realizefn() -> accel_cpu_realize()
72
target/s390x/tcg/translate.c | 20 ++++--
79
accel: Introduce accel_cpu_unrealize() stub
73
target/sh4/translate.c | 5 +-
80
accel: Declare AccelClass::[un]realize_cpu() handlers
74
target/sparc/translate.c | 5 +-
81
accel/tcg: Have tcg_exec_realizefn() return a boolean
75
target/tricore/translate.c | 6 +-
82
accel/tcg: Restrict tcg_exec_[un]realizefn() to TCG
76
target/xtensa/translate.c | 6 +-
83
77
tests/tcg/i386/test-i386.c | 2 +-
84
Richard Henderson (31):
78
tests/tcg/riscv64/noexec.c | 79 +++++++++++++++++++++
85
tcg: Add gvec compare with immediate and scalar operand
79
tests/tcg/s390x/noexec.c | 106 ++++++++++++++++++++++++++++
86
target/arm: Use tcg_gen_gvec_cmpi for compare vs 0
80
tests/tcg/x86_64/noexec.c | 75 ++++++++++++++++++++
87
accel/tcg: Simplify tlb_plugin_lookup
81
tests/tcg/multiarch/noexec.c.inc | 139 ++++++++++++++++++++++++++++++++++++
88
accel/tcg: Split out io_prepare and io_failed
82
tests/tcg/riscv64/Makefile.target | 1 +
89
accel/tcg: Use CPUTLBEntryFull.phys_addr in io_failed
83
tests/tcg/s390x/Makefile.target | 1 +
90
plugin: Simplify struct qemu_plugin_hwaddr
84
tests/tcg/x86_64/Makefile.target | 3 +-
91
accel/tcg: Merge cpu_transaction_failed into io_failed
85
43 files changed, 966 insertions(+), 367 deletions(-)
92
accel/tcg: Replace direct use of io_readx/io_writex in do_{ld,st}_1
86
create mode 100644 tests/tcg/riscv64/noexec.c
93
accel/tcg: Merge io_readx into do_ld_mmio_beN
87
create mode 100644 tests/tcg/s390x/noexec.c
94
accel/tcg: Merge io_writex into do_st_mmio_leN
88
create mode 100644 tests/tcg/x86_64/noexec.c
95
accel/tcg: Introduce do_ld16_mmio_beN
89
create mode 100644 tests/tcg/multiarch/noexec.c.inc
96
accel/tcg: Introduce do_st16_mmio_leN
97
fpu: Handle m68k extended precision denormals properly
98
tcg: Add tcg_out_tb_start backend hook
99
util/cpuinfo-aarch64: Add CPUINFO_BTI
100
tcg/aarch64: Emit BTI insns at jump landing pads
101
tcg: Map code_gen_buffer with PROT_BTI
102
accel/tcg: Move CPUTLB definitions from cpu-defs.h
103
qom: Propagate alignment through type system
104
target/arm: Remove size and alignment for cpu subclasses
105
target/*: Add instance_align to all cpu base classes
106
accel/tcg: Validate placement of CPUNegativeOffsetState
107
accel/tcg: Move CPUNegativeOffsetState into CPUState
108
accel/tcg: Remove CPUState.icount_decr_ptr
109
accel/tcg: Move can_do_io to CPUNegativeOffsetState
110
accel/tcg: Remove cpu_neg()
111
tcg: Rename cpu_env to tcg_env
112
accel/tcg: Replace CPUState.env_ptr with cpu_env()
113
accel/tcg: Remove cpu_set_cpustate_pointers
114
accel/tcg: Remove env_neg()
115
tcg: Remove TCGContext.tlb_fast_offset
116
117
MAINTAINERS | 7 +-
118
meson.build | 6 +-
119
accel/tcg/atomic_template.h | 20 +-
120
accel/tcg/internal-common.h | 28 +
121
accel/tcg/{internal.h => internal-target.h} | 21 +-
122
accel/tcg/tcg-runtime.h | 25 +
123
host/include/aarch64/host/cpuinfo.h | 1 +
124
include/exec/cpu-all.h | 67 +-
125
include/exec/cpu-common.h | 39 +
126
include/exec/cpu-defs.h | 138 -
127
include/exec/cpu_ldst.h | 8 +-
128
include/exec/exec-all.h | 32 +-
129
include/exec/user/thunk.h | 3 +-
130
include/fpu/softfloat.h | 12 +
131
include/hw/core/cpu.h | 180 +-
132
include/qemu/accel.h | 12 +-
133
include/qemu/plugin-memory.h | 11 +-
134
include/qemu/typedefs.h | 1 -
135
include/tcg/tcg-op-gvec-common.h | 6 +
136
include/tcg/tcg.h | 3 +-
137
target/alpha/cpu.h | 1 -
138
target/arm/common-semi-target.h | 2 +-
139
target/arm/cpu-param.h | 12 -
140
target/arm/cpu.h | 1 -
141
target/arm/tcg/translate-a32.h | 2 +-
142
target/arm/tcg/translate-a64.h | 4 +-
143
target/arm/tcg/translate.h | 16 +-
144
target/avr/cpu.h | 1 -
145
target/cris/cpu.h | 1 -
146
target/hexagon/cpu.h | 2 +-
147
target/hexagon/gen_tcg.h | 120 +-
148
target/hexagon/gen_tcg_hvx.h | 20 +-
149
target/hexagon/macros.h | 8 +-
150
target/hppa/cpu.h | 1 -
151
target/i386/cpu.h | 1 -
152
target/loongarch/cpu.h | 1 -
153
target/m68k/cpu.h | 1 -
154
target/microblaze/cpu.h | 6 +-
155
target/mips/cpu.h | 4 +-
156
target/mips/tcg/translate.h | 6 +-
157
target/nios2/cpu.h | 1 -
158
target/openrisc/cpu.h | 1 -
159
target/ppc/cpu.h | 1 -
160
target/riscv/cpu.h | 2 +-
161
target/rx/cpu.h | 1 -
162
target/s390x/cpu.h | 1 -
163
target/sh4/cpu.h | 1 -
164
target/sparc/cpu.h | 1 -
165
target/tricore/cpu.h | 1 -
166
target/xtensa/cpu.h | 3 +-
167
tcg/loongarch64/tcg-target-con-set.h | 9 +
168
tcg/loongarch64/tcg-target-con-str.h | 3 +
169
tcg/loongarch64/tcg-target.h | 40 +-
170
tcg/loongarch64/tcg-target.opc.h | 12 +
171
accel/{accel-common.c => accel-target.c} | 27 +-
172
accel/dummy-cpus.c | 2 +-
173
accel/kvm/kvm-accel-ops.c | 2 +-
174
accel/tcg/cpu-exec-common.c | 5 +-
175
accel/tcg/cpu-exec.c | 31 +-
176
accel/tcg/cputlb.c | 1156 ++--
177
softmmu/icount.c => accel/tcg/icount-common.c | 7 +-
178
accel/tcg/monitor.c | 2 +-
179
accel/tcg/plugin-gen.c | 10 +-
180
accel/tcg/tb-maint.c | 3 +-
181
accel/tcg/tcg-accel-ops-icount.c | 8 +-
182
accel/tcg/tcg-accel-ops-mttcg.c | 11 +-
183
accel/tcg/tcg-accel-ops-rr.c | 4 +-
184
accel/tcg/tcg-accel-ops.c | 2 +-
185
accel/tcg/tcg-all.c | 4 +-
186
accel/tcg/tcg-runtime-gvec.c | 26 +
187
accel/tcg/translate-all.c | 15 +-
188
accel/tcg/translator.c | 22 +-
189
accel/tcg/user-exec.c | 279 +-
190
bsd-user/main.c | 2 +-
191
bsd-user/signal.c | 10 +-
192
cpus-common.c => cpu-common.c | 0
193
cpu.c => cpu-target.c | 13 +-
194
fpu/softfloat.c | 67 +-
195
gdbstub/gdbstub.c | 4 +-
196
gdbstub/user-target.c | 2 +-
197
hw/core/cpu-common.c | 6 +-
198
hw/i386/kvm/clock.c | 2 +-
199
hw/intc/mips_gic.c | 2 +-
200
hw/intc/riscv_aclint.c | 12 +-
201
hw/intc/riscv_imsic.c | 2 +-
202
hw/ppc/e500.c | 4 +-
203
hw/ppc/spapr.c | 2 +-
204
linux-user/elfload.c | 4 +-
205
linux-user/i386/cpu_loop.c | 2 +-
206
linux-user/main.c | 4 +-
207
linux-user/signal.c | 15 +-
208
monitor/hmp-cmds-target.c | 2 +-
209
page-vary.c => page-vary-target.c | 0
210
plugins/api.c | 27 +-
211
qom/object.c | 14 +
212
semihosting/arm-compat-semi.c | 6 +-
213
semihosting/syscalls.c | 28 +-
214
softmmu/async-teardown.c | 3 -
215
softmmu/watchpoint.c | 2 +-
216
target/alpha/cpu.c | 3 +-
217
target/alpha/translate.c | 146 +-
218
target/arm/cpu.c | 12 +-
219
target/arm/cpu64.c | 4 -
220
target/arm/helper.c | 2 +-
221
target/arm/ptw.c | 4 +-
222
target/arm/tcg/mte_helper.c | 2 +-
223
target/arm/tcg/sve_helper.c | 2 +-
224
target/arm/tcg/tlb_helper.c | 4 +-
225
target/arm/tcg/translate-a64.c | 380 +-
226
target/arm/tcg/translate-m-nocp.c | 24 +-
227
target/arm/tcg/translate-mve.c | 52 +-
228
target/arm/tcg/translate-neon.c | 78 +-
229
target/arm/tcg/translate-sme.c | 8 +-
230
target/arm/tcg/translate-sve.c | 172 +-
231
target/arm/tcg/translate-vfp.c | 56 +-
232
target/arm/tcg/translate.c | 290 +-
233
target/avr/cpu.c | 3 +-
234
target/avr/translate.c | 66 +-
235
target/cris/cpu.c | 3 +-
236
target/cris/translate.c | 72 +-
237
target/hexagon/cpu.c | 4 +-
238
target/hexagon/genptr.c | 36 +-
239
target/hexagon/idef-parser/parser-helpers.c | 2 +-
240
target/hexagon/translate.c | 52 +-
241
target/hppa/cpu.c | 2 +-
242
target/hppa/mem_helper.c | 2 +-
243
target/hppa/translate.c | 148 +-
244
target/i386/cpu.c | 2 +-
245
target/i386/kvm/kvm-cpu.c | 2 +-
246
target/i386/nvmm/nvmm-all.c | 14 +-
247
target/i386/tcg/sysemu/excp_helper.c | 2 +-
248
target/i386/tcg/tcg-cpu.c | 2 +-
249
target/i386/tcg/translate.c | 584 +-
250
target/i386/whpx/whpx-all.c | 26 +-
251
target/loongarch/cpu.c | 9 +-
252
target/loongarch/translate.c | 22 +-
253
target/m68k/cpu.c | 9 +-
254
target/m68k/translate.c | 306 +-
255
target/microblaze/cpu.c | 2 +-
256
target/microblaze/translate.c | 52 +-
257
target/mips/cpu.c | 2 +-
258
target/mips/tcg/lcsr_translate.c | 6 +-
259
target/mips/tcg/msa_translate.c | 34 +-
260
target/mips/tcg/mxu_translate.c | 4 +-
261
target/mips/tcg/sysemu/mips-semi.c | 4 +-
262
target/mips/tcg/translate.c | 1288 ++---
263
target/mips/tcg/vr54xx_translate.c | 2 +-
264
target/nios2/cpu.c | 5 +-
265
target/nios2/translate.c | 52 +-
266
target/openrisc/cpu.c | 7 +-
267
target/openrisc/translate.c | 86 +-
268
target/ppc/cpu_init.c | 1 -
269
target/ppc/excp_helper.c | 10 +-
270
target/ppc/translate.c | 366 +-
271
target/riscv/cpu.c | 8 +-
272
target/riscv/translate.c | 56 +-
273
target/rx/cpu.c | 5 +-
274
target/rx/translate.c | 58 +-
275
target/s390x/cpu.c | 2 -
276
target/s390x/tcg/translate.c | 426 +-
277
target/sh4/cpu.c | 3 +-
278
target/sh4/op_helper.c | 2 +-
279
target/sh4/translate.c | 128 +-
280
target/sparc/cpu.c | 3 +-
281
target/sparc/translate.c | 332 +-
282
target/tricore/cpu.c | 10 +-
283
target/tricore/translate.c | 224 +-
284
target/xtensa/cpu.c | 2 +-
285
target/xtensa/translate.c | 192 +-
286
tcg/region.c | 41 +-
287
tcg/tcg-op-gvec.c | 437 +-
288
tcg/tcg-op-ldst.c | 22 +-
289
tcg/tcg-op.c | 2 +-
290
tcg/tcg.c | 26 +-
291
tests/tcg/m68k/denormal.c | 53 +
292
util/cpuinfo-aarch64.c | 7 +
293
util/oslib-posix.c | 15 +-
294
accel/tcg/ldst_atomicity.c.inc | 88 +-
295
accel/tcg/ldst_common.c.inc | 225 +
296
fpu/softfloat-parts.c.inc | 7 +-
297
target/cris/translate_v10.c.inc | 28 +-
298
target/i386/tcg/decode-new.c.inc | 4 +-
299
target/i386/tcg/emit.c.inc | 262 +-
300
target/loongarch/insn_trans/trans_atomic.c.inc | 4 +-
301
target/loongarch/insn_trans/trans_branch.c.inc | 2 +-
302
target/loongarch/insn_trans/trans_extra.c.inc | 10 +-
303
target/loongarch/insn_trans/trans_farith.c.inc | 6 +-
304
target/loongarch/insn_trans/trans_fcmp.c.inc | 8 +-
305
target/loongarch/insn_trans/trans_fmemory.c.inc | 8 +-
306
target/loongarch/insn_trans/trans_fmov.c.inc | 20 +-
307
target/loongarch/insn_trans/trans_lsx.c.inc | 44 +-
308
target/loongarch/insn_trans/trans_memory.c.inc | 8 +-
309
target/loongarch/insn_trans/trans_privileged.c.inc | 52 +-
310
target/mips/tcg/micromips_translate.c.inc | 12 +-
311
target/mips/tcg/nanomips_translate.c.inc | 200 +-
312
target/ppc/power8-pmu-regs.c.inc | 8 +-
313
target/ppc/translate/branch-impl.c.inc | 2 +-
314
target/ppc/translate/dfp-impl.c.inc | 22 +-
315
target/ppc/translate/fixedpoint-impl.c.inc | 2 +-
316
target/ppc/translate/fp-impl.c.inc | 50 +-
317
target/ppc/translate/processor-ctrl-impl.c.inc | 8 +-
318
target/ppc/translate/spe-impl.c.inc | 30 +-
319
target/ppc/translate/storage-ctrl-impl.c.inc | 26 +-
320
target/ppc/translate/vmx-impl.c.inc | 34 +-
321
target/ppc/translate/vsx-impl.c.inc | 54 +-
322
target/riscv/insn_trans/trans_privileged.c.inc | 8 +-
323
target/riscv/insn_trans/trans_rvbf16.c.inc | 10 +-
324
target/riscv/insn_trans/trans_rvd.c.inc | 48 +-
325
target/riscv/insn_trans/trans_rvf.c.inc | 46 +-
326
target/riscv/insn_trans/trans_rvh.c.inc | 8 +-
327
target/riscv/insn_trans/trans_rvi.c.inc | 16 +-
328
target/riscv/insn_trans/trans_rvm.c.inc | 16 +-
329
target/riscv/insn_trans/trans_rvv.c.inc | 130 +-
330
target/riscv/insn_trans/trans_rvvk.c.inc | 30 +-
331
target/riscv/insn_trans/trans_rvzce.c.inc | 2 +-
332
target/riscv/insn_trans/trans_rvzfa.c.inc | 38 +-
333
target/riscv/insn_trans/trans_rvzfh.c.inc | 54 +-
334
target/riscv/insn_trans/trans_rvzicbo.c.inc | 8 +-
335
target/riscv/insn_trans/trans_svinval.c.inc | 6 +-
336
target/riscv/insn_trans/trans_xthead.c.inc | 2 +-
337
target/s390x/tcg/translate_vx.c.inc | 104 +-
338
tcg/aarch64/tcg-target.c.inc | 61 +-
339
tcg/arm/tcg-target.c.inc | 9 +-
340
tcg/i386/tcg-target.c.inc | 7 +-
341
tcg/loongarch64/tcg-insn-defs.c.inc | 6019 +++++++++++++++++++-
342
tcg/loongarch64/tcg-target.c.inc | 628 +-
343
tcg/mips/tcg-target.c.inc | 7 +-
344
tcg/ppc/tcg-target.c.inc | 7 +-
345
tcg/riscv/tcg-target.c.inc | 7 +-
346
tcg/s390x/tcg-target.c.inc | 7 +-
347
tcg/sparc64/tcg-target.c.inc | 7 +-
348
tcg/tci/tcg-target.c.inc | 7 +-
349
accel/meson.build | 4 +-
350
accel/tcg/meson.build | 8 +-
351
softmmu/meson.build | 4 -
352
target/hexagon/README | 10 +-
353
target/hexagon/gen_tcg_funcs.py | 16 +-
354
tests/tcg/m68k/Makefile.target | 2 +-
355
238 files changed, 12363 insertions(+), 5537 deletions(-)
356
create mode 100644 accel/tcg/internal-common.h
357
rename accel/tcg/{internal.h => internal-target.h} (89%)
358
create mode 100644 tcg/loongarch64/tcg-target.opc.h
359
rename accel/{accel-common.c => accel-target.c} (87%)
360
rename softmmu/icount.c => accel/tcg/icount-common.c (99%)
361
rename cpus-common.c => cpu-common.c (100%)
362
rename cpu.c => cpu-target.c (97%)
363
rename page-vary.c => page-vary-target.c (100%)
364
create mode 100644 tests/tcg/m68k/denormal.c
365
diff view generated by jsdifflib
New patch
1
From: Nicholas Piggin <npiggin@gmail.com>
1
2
3
mttcg asserts that an execution ending with EXCP_HALTED must have
4
cpu->halted. However between the event or instruction that sets
5
cpu->halted and requests exit and the assertion here, an
6
asynchronous event could clear cpu->halted.
7
8
This leads to crashes running AIX on ppc/pseries because it uses
9
H_CEDE/H_PROD hcalls, where H_CEDE sets self->halted = 1 and
10
H_PROD sets other cpu->halted = 0 and kicks it.
11
12
H_PROD could be turned into an interrupt to wake, but several other
13
places in ppc, sparc, and semihosting follow what looks like a similar
14
pattern setting halted = 0 directly. So remove this assertion.
15
16
Reported-by: Ivan Warren <ivan@vmfacility.fr>
17
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
18
Message-Id: <20230829010658.8252-1-npiggin@gmail.com>
19
[rth: Keep the case label and adjust the comment.]
20
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
---
22
accel/tcg/tcg-accel-ops-mttcg.c | 9 ++-------
23
1 file changed, 2 insertions(+), 7 deletions(-)
24
25
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/accel/tcg/tcg-accel-ops-mttcg.c
28
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
29
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
30
break;
31
case EXCP_HALTED:
32
/*
33
- * during start-up the vCPU is reset and the thread is
34
- * kicked several times. If we don't ensure we go back
35
- * to sleep in the halted state we won't cleanly
36
- * start-up when the vCPU is enabled.
37
- *
38
- * cpu->halted should ensure we sleep in wait_io_event
39
+ * Usually cpu->halted is set, but may have already been
40
+ * reset by another thread by the time we arrive here.
41
*/
42
- g_assert(cpu->halted);
43
break;
44
case EXCP_ATOMIC:
45
qemu_mutex_unlock_iothread();
46
--
47
2.34.1
diff view generated by jsdifflib
New patch
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
1
2
3
When memory region is ram, the lower TARGET_PAGE_BITS is not the
4
physical section number. Instead, its value is always 0.
5
6
Add comment and assert to make it clear.
7
8
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
9
Message-Id: <20230901060118.379-1-zhiwei_liu@linux.alibaba.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
include/exec/cpu-defs.h | 12 ++++++------
14
accel/tcg/cputlb.c | 11 +++++++----
15
2 files changed, 13 insertions(+), 10 deletions(-)
16
17
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-defs.h
20
+++ b/include/exec/cpu-defs.h
21
@@ -XXX,XX +XXX,XX @@
22
typedef struct CPUTLBEntryFull {
23
/*
24
* @xlat_section contains:
25
- * - in the lower TARGET_PAGE_BITS, a physical section number
26
- * - with the lower TARGET_PAGE_BITS masked off, an offset which
27
- * must be added to the virtual address to obtain:
28
- * + the ram_addr_t of the target RAM (if the physical section
29
- * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
30
- * + the offset within the target MemoryRegion (otherwise)
31
+ * - For ram, an offset which must be added to the virtual address
32
+ * to obtain the ram_addr_t of the target RAM
33
+ * - For other memory regions,
34
+ * + in the lower TARGET_PAGE_BITS, the physical section number
35
+ * + with the TARGET_PAGE_BITS masked off, the offset within
36
+ * the target MemoryRegion
37
*/
38
hwaddr xlat_section;
39
40
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/accel/tcg/cputlb.c
43
+++ b/accel/tcg/cputlb.c
44
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
45
write_flags = read_flags;
46
if (is_ram) {
47
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
48
+ assert(!(iotlb & ~TARGET_PAGE_MASK));
49
/*
50
* Computing is_clean is expensive; avoid all that unless
51
* the page is actually writable.
52
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
53
54
/* refill the tlb */
55
/*
56
- * At this point iotlb contains a physical section number in the lower
57
- * TARGET_PAGE_BITS, and either
58
- * + the ram_addr_t of the page base of the target RAM (RAM)
59
- * + the offset within section->mr of the page base (I/O, ROMD)
60
+ * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
61
+ * aligned ram_addr_t of the page base of the target RAM.
62
+ * Otherwise, iotlb contains
63
+ * - a physical section number in the lower TARGET_PAGE_BITS
64
+ * - the offset within section->mr of the page base (I/O, ROMD) with the
65
+ * TARGET_PAGE_BITS masked off.
66
* We subtract addr_page (which is page aligned and thus won't
67
* disturb the low bits) to give an offset which can be added to the
68
* (non-page-aligned) vaddr of the eventual memory access to get
69
--
70
2.34.1
diff view generated by jsdifflib
1
Map the stack executable if required by default or on demand.
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
2
2
3
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
3
IA-64 and PA-RISC host support is already removed with commit
4
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
b1cef6d02f ("Drop remaining bits of ia64 host support").
5
6
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
7
Message-Id: <20230810225922.21600-1-akihiko.odaki@daynix.com>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
---
9
---
7
include/elf.h | 1 +
10
util/oslib-posix.c | 15 +++------------
8
linux-user/qemu.h | 1 +
11
1 file changed, 3 insertions(+), 12 deletions(-)
9
linux-user/elfload.c | 19 ++++++++++++++++++-
10
3 files changed, 20 insertions(+), 1 deletion(-)
11
12
12
diff --git a/include/elf.h b/include/elf.h
13
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/include/elf.h
15
--- a/util/oslib-posix.c
15
+++ b/include/elf.h
16
+++ b/util/oslib-posix.c
16
@@ -XXX,XX +XXX,XX @@ typedef int64_t Elf64_Sxword;
17
@@ -XXX,XX +XXX,XX @@ char *qemu_get_pid_name(pid_t pid)
17
#define PT_LOPROC 0x70000000
18
18
#define PT_HIPROC 0x7fffffff
19
void *qemu_alloc_stack(size_t *sz)
19
20
+#define PT_GNU_STACK (PT_LOOS + 0x474e551)
21
#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
22
23
#define PT_MIPS_REGINFO 0x70000000
24
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/linux-user/qemu.h
27
+++ b/linux-user/qemu.h
28
@@ -XXX,XX +XXX,XX @@ struct image_info {
29
uint32_t elf_flags;
30
int personality;
31
abi_ulong alignment;
32
+ bool exec_stack;
33
34
/* Generic semihosting knows about these pointers. */
35
abi_ulong arg_strings; /* strings for argv */
36
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
37
index XXXXXXX..XXXXXXX 100644
38
--- a/linux-user/elfload.c
39
+++ b/linux-user/elfload.c
40
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
41
#define ELF_ARCH EM_386
42
43
#define ELF_PLATFORM get_elf_platform()
44
+#define EXSTACK_DEFAULT true
45
46
static const char *get_elf_platform(void)
47
{
20
{
48
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
21
- void *ptr, *guardpage;
49
22
+ void *ptr;
50
#define ELF_ARCH EM_ARM
23
int flags;
51
#define ELF_CLASS ELFCLASS32
24
#ifdef CONFIG_DEBUG_STACK_USAGE
52
+#define EXSTACK_DEFAULT true
25
void *ptr2;
53
26
@@ -XXX,XX +XXX,XX @@ void *qemu_alloc_stack(size_t *sz)
54
static inline void init_thread(struct target_pt_regs *regs,
27
abort();
55
struct image_info *infop)
56
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
57
#else
58
59
#define ELF_CLASS ELFCLASS32
60
+#define EXSTACK_DEFAULT true
61
62
#endif
63
64
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
65
66
#define ELF_CLASS ELFCLASS64
67
#define ELF_ARCH EM_LOONGARCH
68
+#define EXSTACK_DEFAULT true
69
70
#define elf_check_arch(x) ((x) == EM_LOONGARCH)
71
72
@@ -XXX,XX +XXX,XX @@ static uint32_t get_elf_hwcap(void)
73
#define ELF_CLASS ELFCLASS32
74
#endif
75
#define ELF_ARCH EM_MIPS
76
+#define EXSTACK_DEFAULT true
77
78
#ifdef TARGET_ABI_MIPSN32
79
#define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
80
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
81
#define bswaptls(ptr) bswap32s(ptr)
82
#endif
83
84
+#ifndef EXSTACK_DEFAULT
85
+#define EXSTACK_DEFAULT false
86
+#endif
87
+
88
#include "elf.h"
89
90
/* We must delay the following stanzas until after "elf.h". */
91
@@ -XXX,XX +XXX,XX @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
92
struct image_info *info)
93
{
94
abi_ulong size, error, guard;
95
+ int prot;
96
97
size = guest_stack_size;
98
if (size < STACK_LOWER_LIMIT) {
99
@@ -XXX,XX +XXX,XX @@ static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
100
guard = qemu_real_host_page_size();
101
}
28
}
102
29
103
- error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
30
-#if defined(HOST_IA64)
104
+ prot = PROT_READ | PROT_WRITE;
31
- /* separate register stack */
105
+ if (info->exec_stack) {
32
- guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz);
106
+ prot |= PROT_EXEC;
33
-#elif defined(HOST_HPPA)
107
+ }
34
- /* stack grows up */
108
+ error = target_mmap(0, size + guard, prot,
35
- guardpage = ptr + *sz - pagesz;
109
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
36
-#else
110
if (error == -1) {
37
- /* stack grows down */
111
perror("mmap stack");
38
- guardpage = ptr;
112
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
39
-#endif
113
*/
40
- if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
114
loaddr = -1, hiaddr = 0;
41
+ /* Stack grows down -- guard page at the bottom. */
115
info->alignment = 0;
42
+ if (mprotect(ptr, pagesz, PROT_NONE) != 0) {
116
+ info->exec_stack = EXSTACK_DEFAULT;
43
perror("failed to set up stack guard page");
117
for (i = 0; i < ehdr->e_phnum; ++i) {
44
abort();
118
struct elf_phdr *eppnt = phdr + i;
119
if (eppnt->p_type == PT_LOAD) {
120
@@ -XXX,XX +XXX,XX @@ static void load_elf_image(const char *image_name, int image_fd,
121
if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) {
122
goto exit_errmsg;
123
}
124
+ } else if (eppnt->p_type == PT_GNU_STACK) {
125
+ info->exec_stack = eppnt->p_flags & PF_X;
126
}
127
}
45
}
128
129
--
46
--
130
2.34.1
47
2.34.1
diff view generated by jsdifflib
New patch
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
1
2
3
PA-RISC host support is already removed with commit
4
b1cef6d02f ("Drop remaining bits of ia64 host support").
5
6
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
7
Message-Id: <20230810225922.21600-1-akihiko.odaki@daynix.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
softmmu/async-teardown.c | 3 ---
11
1 file changed, 3 deletions(-)
12
13
diff --git a/softmmu/async-teardown.c b/softmmu/async-teardown.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/softmmu/async-teardown.c
16
+++ b/softmmu/async-teardown.c
17
@@ -XXX,XX +XXX,XX @@ static void *new_stack_for_clone(void)
18
19
/* Allocate a new stack and get a pointer to its top. */
20
stack_ptr = qemu_alloc_stack(&stack_size);
21
-#if !defined(HOST_HPPA)
22
- /* The top is at the end of the area, except on HPPA. */
23
stack_ptr += stack_size;
24
-#endif
25
26
return stack_ptr;
27
}
28
--
29
2.34.1
diff view generated by jsdifflib
1
Pass these along to translator_loop -- pc may be used instead
1
From: Jiajie Chen <c@jia.je>
2
of tb->pc, and host_pc is currently unused. Adjust all targets
3
at one time.
4
2
5
Acked-by: Alistair Francis <alistair.francis@wdc.com>
3
Add opcodes and encoder functions for LSX.
6
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
7
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Generated from
6
https://github.com/jiegec/loongarch-opcodes/tree/qemu-lsx.
7
8
Signed-off-by: Jiajie Chen <c@jia.je>
9
Acked-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230908022302.180442-2-c@jia.je>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
12
---
10
include/exec/exec-all.h | 1 -
13
tcg/loongarch64/tcg-insn-defs.c.inc | 6019 ++++++++++++++++++++++++++-
11
include/exec/translator.h | 24 ++++++++++++++++++++----
14
1 file changed, 6018 insertions(+), 1 deletion(-)
12
accel/tcg/translate-all.c | 6 ++++--
13
accel/tcg/translator.c | 9 +++++----
14
target/alpha/translate.c | 5 +++--
15
target/arm/translate.c | 5 +++--
16
target/avr/translate.c | 5 +++--
17
target/cris/translate.c | 5 +++--
18
target/hexagon/translate.c | 6 ++++--
19
target/hppa/translate.c | 5 +++--
20
target/i386/tcg/translate.c | 5 +++--
21
target/loongarch/translate.c | 6 ++++--
22
target/m68k/translate.c | 5 +++--
23
target/microblaze/translate.c | 5 +++--
24
target/mips/tcg/translate.c | 5 +++--
25
target/nios2/translate.c | 5 +++--
26
target/openrisc/translate.c | 6 ++++--
27
target/ppc/translate.c | 5 +++--
28
target/riscv/translate.c | 5 +++--
29
target/rx/translate.c | 5 +++--
30
target/s390x/tcg/translate.c | 5 +++--
31
target/sh4/translate.c | 5 +++--
32
target/sparc/translate.c | 5 +++--
33
target/tricore/translate.c | 6 ++++--
34
target/xtensa/translate.c | 6 ++++--
35
25 files changed, 97 insertions(+), 53 deletions(-)
36
15
37
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
16
diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc
38
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
39
--- a/include/exec/exec-all.h
18
--- a/tcg/loongarch64/tcg-insn-defs.c.inc
40
+++ b/include/exec/exec-all.h
19
+++ b/tcg/loongarch64/tcg-insn-defs.c.inc
41
@@ -XXX,XX +XXX,XX @@ typedef ram_addr_t tb_page_addr_t;
42
#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
43
#endif
44
45
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
46
void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
47
target_ulong *data);
48
49
diff --git a/include/exec/translator.h b/include/exec/translator.h
50
index XXXXXXX..XXXXXXX 100644
51
--- a/include/exec/translator.h
52
+++ b/include/exec/translator.h
53
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@
54
#include "exec/translate-all.h"
55
#include "tcg/tcg.h"
56
57
+/**
58
+ * gen_intermediate_code
59
+ * @cpu: cpu context
60
+ * @tb: translation block
61
+ * @max_insns: max number of instructions to translate
62
+ * @pc: guest virtual program counter address
63
+ * @host_pc: host physical program counter address
64
+ *
65
+ * This function must be provided by the target, which should create
66
+ * the target-specific DisasContext, and then invoke translator_loop.
67
+ */
68
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
69
+ target_ulong pc, void *host_pc);
70
71
/**
72
* DisasJumpType:
73
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
74
75
/**
76
* translator_loop:
77
- * @ops: Target-specific operations.
78
- * @db: Disassembly context.
79
* @cpu: Target vCPU.
80
* @tb: Translation block.
81
* @max_insns: Maximum number of insns to translate.
82
+ * @pc: guest virtual program counter address
83
+ * @host_pc: host physical program counter address
84
+ * @ops: Target-specific operations.
85
+ * @db: Disassembly context.
86
*
21
*
87
* Generic translator loop.
22
* This file is auto-generated by genqemutcgdefs from
88
*
23
* https://github.com/loongson-community/loongarch-opcodes,
89
@@ -XXX,XX +XXX,XX @@ typedef struct TranslatorOps {
24
- * from commit 25ca7effe9d88101c1cf96c4005423643386d81f.
90
* - When single-stepping is enabled (system-wide or on the current vCPU).
25
+ * from commit 8027da9a8157a8b47fc48ff1def292e09c5668bd.
91
* - When too many instructions have been translated.
26
* DO NOT EDIT.
92
*/
27
*/
93
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
28
94
- CPUState *cpu, TranslationBlock *tb, int max_insns);
29
@@ -XXX,XX +XXX,XX @@ typedef enum {
95
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
30
OPC_ANDI = 0x03400000,
96
+ target_ulong pc, void *host_pc,
31
OPC_ORI = 0x03800000,
97
+ const TranslatorOps *ops, DisasContextBase *db);
32
OPC_XORI = 0x03c00000,
98
33
+ OPC_VFMADD_S = 0x09100000,
99
void translator_loop_temp_check(DisasContextBase *db);
34
+ OPC_VFMADD_D = 0x09200000,
100
35
+ OPC_VFMSUB_S = 0x09500000,
101
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
36
+ OPC_VFMSUB_D = 0x09600000,
102
index XXXXXXX..XXXXXXX 100644
37
+ OPC_VFNMADD_S = 0x09900000,
103
--- a/accel/tcg/translate-all.c
38
+ OPC_VFNMADD_D = 0x09a00000,
104
+++ b/accel/tcg/translate-all.c
39
+ OPC_VFNMSUB_S = 0x09d00000,
105
@@ -XXX,XX +XXX,XX @@
40
+ OPC_VFNMSUB_D = 0x09e00000,
106
41
+ OPC_VFCMP_CAF_S = 0x0c500000,
107
#include "exec/cputlb.h"
42
+ OPC_VFCMP_SAF_S = 0x0c508000,
108
#include "exec/translate-all.h"
43
+ OPC_VFCMP_CLT_S = 0x0c510000,
109
+#include "exec/translator.h"
44
+ OPC_VFCMP_SLT_S = 0x0c518000,
110
#include "qemu/bitmap.h"
45
+ OPC_VFCMP_CEQ_S = 0x0c520000,
111
#include "qemu/qemu-print.h"
46
+ OPC_VFCMP_SEQ_S = 0x0c528000,
112
#include "qemu/timer.h"
47
+ OPC_VFCMP_CLE_S = 0x0c530000,
113
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
48
+ OPC_VFCMP_SLE_S = 0x0c538000,
114
TCGProfile *prof = &tcg_ctx->prof;
49
+ OPC_VFCMP_CUN_S = 0x0c540000,
115
int64_t ti;
50
+ OPC_VFCMP_SUN_S = 0x0c548000,
116
#endif
51
+ OPC_VFCMP_CULT_S = 0x0c550000,
117
+ void *host_pc;
52
+ OPC_VFCMP_SULT_S = 0x0c558000,
118
53
+ OPC_VFCMP_CUEQ_S = 0x0c560000,
119
assert_memory_lock();
54
+ OPC_VFCMP_SUEQ_S = 0x0c568000,
120
qemu_thread_jit_write();
55
+ OPC_VFCMP_CULE_S = 0x0c570000,
121
56
+ OPC_VFCMP_SULE_S = 0x0c578000,
122
- phys_pc = get_page_addr_code(env, pc);
57
+ OPC_VFCMP_CNE_S = 0x0c580000,
123
+ phys_pc = get_page_addr_code_hostp(env, pc, &host_pc);
58
+ OPC_VFCMP_SNE_S = 0x0c588000,
124
59
+ OPC_VFCMP_COR_S = 0x0c5a0000,
125
if (phys_pc == -1) {
60
+ OPC_VFCMP_SOR_S = 0x0c5a8000,
126
/* Generate a one-shot TB with 1 insn in it */
61
+ OPC_VFCMP_CUNE_S = 0x0c5c0000,
127
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
62
+ OPC_VFCMP_SUNE_S = 0x0c5c8000,
128
tcg_func_start(tcg_ctx);
63
+ OPC_VFCMP_CAF_D = 0x0c600000,
129
64
+ OPC_VFCMP_SAF_D = 0x0c608000,
130
tcg_ctx->cpu = env_cpu(env);
65
+ OPC_VFCMP_CLT_D = 0x0c610000,
131
- gen_intermediate_code(cpu, tb, max_insns);
66
+ OPC_VFCMP_SLT_D = 0x0c618000,
132
+ gen_intermediate_code(cpu, tb, max_insns, pc, host_pc);
67
+ OPC_VFCMP_CEQ_D = 0x0c620000,
133
assert(tb->size != 0);
68
+ OPC_VFCMP_SEQ_D = 0x0c628000,
134
tcg_ctx->cpu = NULL;
69
+ OPC_VFCMP_CLE_D = 0x0c630000,
135
max_insns = tb->icount;
70
+ OPC_VFCMP_SLE_D = 0x0c638000,
136
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
71
+ OPC_VFCMP_CUN_D = 0x0c640000,
137
index XXXXXXX..XXXXXXX 100644
72
+ OPC_VFCMP_SUN_D = 0x0c648000,
138
--- a/accel/tcg/translator.c
73
+ OPC_VFCMP_CULT_D = 0x0c650000,
139
+++ b/accel/tcg/translator.c
74
+ OPC_VFCMP_SULT_D = 0x0c658000,
140
@@ -XXX,XX +XXX,XX @@ static inline void translator_page_protect(DisasContextBase *dcbase,
75
+ OPC_VFCMP_CUEQ_D = 0x0c660000,
141
#endif
76
+ OPC_VFCMP_SUEQ_D = 0x0c668000,
77
+ OPC_VFCMP_CULE_D = 0x0c670000,
78
+ OPC_VFCMP_SULE_D = 0x0c678000,
79
+ OPC_VFCMP_CNE_D = 0x0c680000,
80
+ OPC_VFCMP_SNE_D = 0x0c688000,
81
+ OPC_VFCMP_COR_D = 0x0c6a0000,
82
+ OPC_VFCMP_SOR_D = 0x0c6a8000,
83
+ OPC_VFCMP_CUNE_D = 0x0c6c0000,
84
+ OPC_VFCMP_SUNE_D = 0x0c6c8000,
85
+ OPC_VBITSEL_V = 0x0d100000,
86
+ OPC_VSHUF_B = 0x0d500000,
87
OPC_ADDU16I_D = 0x10000000,
88
OPC_LU12I_W = 0x14000000,
89
OPC_CU32I_D = 0x16000000,
90
@@ -XXX,XX +XXX,XX @@ typedef enum {
91
OPC_LD_BU = 0x2a000000,
92
OPC_LD_HU = 0x2a400000,
93
OPC_LD_WU = 0x2a800000,
94
+ OPC_VLD = 0x2c000000,
95
+ OPC_VST = 0x2c400000,
96
+ OPC_VLDREPL_D = 0x30100000,
97
+ OPC_VLDREPL_W = 0x30200000,
98
+ OPC_VLDREPL_H = 0x30400000,
99
+ OPC_VLDREPL_B = 0x30800000,
100
+ OPC_VSTELM_D = 0x31100000,
101
+ OPC_VSTELM_W = 0x31200000,
102
+ OPC_VSTELM_H = 0x31400000,
103
+ OPC_VSTELM_B = 0x31800000,
104
OPC_LDX_B = 0x38000000,
105
OPC_LDX_H = 0x38040000,
106
OPC_LDX_W = 0x38080000,
107
@@ -XXX,XX +XXX,XX @@ typedef enum {
108
OPC_LDX_BU = 0x38200000,
109
OPC_LDX_HU = 0x38240000,
110
OPC_LDX_WU = 0x38280000,
111
+ OPC_VLDX = 0x38400000,
112
+ OPC_VSTX = 0x38440000,
113
OPC_DBAR = 0x38720000,
114
OPC_JIRL = 0x4c000000,
115
OPC_B = 0x50000000,
116
@@ -XXX,XX +XXX,XX @@ typedef enum {
117
OPC_BLE = 0x64000000,
118
OPC_BGTU = 0x68000000,
119
OPC_BLEU = 0x6c000000,
120
+ OPC_VSEQ_B = 0x70000000,
121
+ OPC_VSEQ_H = 0x70008000,
122
+ OPC_VSEQ_W = 0x70010000,
123
+ OPC_VSEQ_D = 0x70018000,
124
+ OPC_VSLE_B = 0x70020000,
125
+ OPC_VSLE_H = 0x70028000,
126
+ OPC_VSLE_W = 0x70030000,
127
+ OPC_VSLE_D = 0x70038000,
128
+ OPC_VSLE_BU = 0x70040000,
129
+ OPC_VSLE_HU = 0x70048000,
130
+ OPC_VSLE_WU = 0x70050000,
131
+ OPC_VSLE_DU = 0x70058000,
132
+ OPC_VSLT_B = 0x70060000,
133
+ OPC_VSLT_H = 0x70068000,
134
+ OPC_VSLT_W = 0x70070000,
135
+ OPC_VSLT_D = 0x70078000,
136
+ OPC_VSLT_BU = 0x70080000,
137
+ OPC_VSLT_HU = 0x70088000,
138
+ OPC_VSLT_WU = 0x70090000,
139
+ OPC_VSLT_DU = 0x70098000,
140
+ OPC_VADD_B = 0x700a0000,
141
+ OPC_VADD_H = 0x700a8000,
142
+ OPC_VADD_W = 0x700b0000,
143
+ OPC_VADD_D = 0x700b8000,
144
+ OPC_VSUB_B = 0x700c0000,
145
+ OPC_VSUB_H = 0x700c8000,
146
+ OPC_VSUB_W = 0x700d0000,
147
+ OPC_VSUB_D = 0x700d8000,
148
+ OPC_VADDWEV_H_B = 0x701e0000,
149
+ OPC_VADDWEV_W_H = 0x701e8000,
150
+ OPC_VADDWEV_D_W = 0x701f0000,
151
+ OPC_VADDWEV_Q_D = 0x701f8000,
152
+ OPC_VSUBWEV_H_B = 0x70200000,
153
+ OPC_VSUBWEV_W_H = 0x70208000,
154
+ OPC_VSUBWEV_D_W = 0x70210000,
155
+ OPC_VSUBWEV_Q_D = 0x70218000,
156
+ OPC_VADDWOD_H_B = 0x70220000,
157
+ OPC_VADDWOD_W_H = 0x70228000,
158
+ OPC_VADDWOD_D_W = 0x70230000,
159
+ OPC_VADDWOD_Q_D = 0x70238000,
160
+ OPC_VSUBWOD_H_B = 0x70240000,
161
+ OPC_VSUBWOD_W_H = 0x70248000,
162
+ OPC_VSUBWOD_D_W = 0x70250000,
163
+ OPC_VSUBWOD_Q_D = 0x70258000,
164
+ OPC_VADDWEV_H_BU = 0x702e0000,
165
+ OPC_VADDWEV_W_HU = 0x702e8000,
166
+ OPC_VADDWEV_D_WU = 0x702f0000,
167
+ OPC_VADDWEV_Q_DU = 0x702f8000,
168
+ OPC_VSUBWEV_H_BU = 0x70300000,
169
+ OPC_VSUBWEV_W_HU = 0x70308000,
170
+ OPC_VSUBWEV_D_WU = 0x70310000,
171
+ OPC_VSUBWEV_Q_DU = 0x70318000,
172
+ OPC_VADDWOD_H_BU = 0x70320000,
173
+ OPC_VADDWOD_W_HU = 0x70328000,
174
+ OPC_VADDWOD_D_WU = 0x70330000,
175
+ OPC_VADDWOD_Q_DU = 0x70338000,
176
+ OPC_VSUBWOD_H_BU = 0x70340000,
177
+ OPC_VSUBWOD_W_HU = 0x70348000,
178
+ OPC_VSUBWOD_D_WU = 0x70350000,
179
+ OPC_VSUBWOD_Q_DU = 0x70358000,
180
+ OPC_VADDWEV_H_BU_B = 0x703e0000,
181
+ OPC_VADDWEV_W_HU_H = 0x703e8000,
182
+ OPC_VADDWEV_D_WU_W = 0x703f0000,
183
+ OPC_VADDWEV_Q_DU_D = 0x703f8000,
184
+ OPC_VADDWOD_H_BU_B = 0x70400000,
185
+ OPC_VADDWOD_W_HU_H = 0x70408000,
186
+ OPC_VADDWOD_D_WU_W = 0x70410000,
187
+ OPC_VADDWOD_Q_DU_D = 0x70418000,
188
+ OPC_VSADD_B = 0x70460000,
189
+ OPC_VSADD_H = 0x70468000,
190
+ OPC_VSADD_W = 0x70470000,
191
+ OPC_VSADD_D = 0x70478000,
192
+ OPC_VSSUB_B = 0x70480000,
193
+ OPC_VSSUB_H = 0x70488000,
194
+ OPC_VSSUB_W = 0x70490000,
195
+ OPC_VSSUB_D = 0x70498000,
196
+ OPC_VSADD_BU = 0x704a0000,
197
+ OPC_VSADD_HU = 0x704a8000,
198
+ OPC_VSADD_WU = 0x704b0000,
199
+ OPC_VSADD_DU = 0x704b8000,
200
+ OPC_VSSUB_BU = 0x704c0000,
201
+ OPC_VSSUB_HU = 0x704c8000,
202
+ OPC_VSSUB_WU = 0x704d0000,
203
+ OPC_VSSUB_DU = 0x704d8000,
204
+ OPC_VHADDW_H_B = 0x70540000,
205
+ OPC_VHADDW_W_H = 0x70548000,
206
+ OPC_VHADDW_D_W = 0x70550000,
207
+ OPC_VHADDW_Q_D = 0x70558000,
208
+ OPC_VHSUBW_H_B = 0x70560000,
209
+ OPC_VHSUBW_W_H = 0x70568000,
210
+ OPC_VHSUBW_D_W = 0x70570000,
211
+ OPC_VHSUBW_Q_D = 0x70578000,
212
+ OPC_VHADDW_HU_BU = 0x70580000,
213
+ OPC_VHADDW_WU_HU = 0x70588000,
214
+ OPC_VHADDW_DU_WU = 0x70590000,
215
+ OPC_VHADDW_QU_DU = 0x70598000,
216
+ OPC_VHSUBW_HU_BU = 0x705a0000,
217
+ OPC_VHSUBW_WU_HU = 0x705a8000,
218
+ OPC_VHSUBW_DU_WU = 0x705b0000,
219
+ OPC_VHSUBW_QU_DU = 0x705b8000,
220
+ OPC_VADDA_B = 0x705c0000,
221
+ OPC_VADDA_H = 0x705c8000,
222
+ OPC_VADDA_W = 0x705d0000,
223
+ OPC_VADDA_D = 0x705d8000,
224
+ OPC_VABSD_B = 0x70600000,
225
+ OPC_VABSD_H = 0x70608000,
226
+ OPC_VABSD_W = 0x70610000,
227
+ OPC_VABSD_D = 0x70618000,
228
+ OPC_VABSD_BU = 0x70620000,
229
+ OPC_VABSD_HU = 0x70628000,
230
+ OPC_VABSD_WU = 0x70630000,
231
+ OPC_VABSD_DU = 0x70638000,
232
+ OPC_VAVG_B = 0x70640000,
233
+ OPC_VAVG_H = 0x70648000,
234
+ OPC_VAVG_W = 0x70650000,
235
+ OPC_VAVG_D = 0x70658000,
236
+ OPC_VAVG_BU = 0x70660000,
237
+ OPC_VAVG_HU = 0x70668000,
238
+ OPC_VAVG_WU = 0x70670000,
239
+ OPC_VAVG_DU = 0x70678000,
240
+ OPC_VAVGR_B = 0x70680000,
241
+ OPC_VAVGR_H = 0x70688000,
242
+ OPC_VAVGR_W = 0x70690000,
243
+ OPC_VAVGR_D = 0x70698000,
244
+ OPC_VAVGR_BU = 0x706a0000,
245
+ OPC_VAVGR_HU = 0x706a8000,
246
+ OPC_VAVGR_WU = 0x706b0000,
247
+ OPC_VAVGR_DU = 0x706b8000,
248
+ OPC_VMAX_B = 0x70700000,
249
+ OPC_VMAX_H = 0x70708000,
250
+ OPC_VMAX_W = 0x70710000,
251
+ OPC_VMAX_D = 0x70718000,
252
+ OPC_VMIN_B = 0x70720000,
253
+ OPC_VMIN_H = 0x70728000,
254
+ OPC_VMIN_W = 0x70730000,
255
+ OPC_VMIN_D = 0x70738000,
256
+ OPC_VMAX_BU = 0x70740000,
257
+ OPC_VMAX_HU = 0x70748000,
258
+ OPC_VMAX_WU = 0x70750000,
259
+ OPC_VMAX_DU = 0x70758000,
260
+ OPC_VMIN_BU = 0x70760000,
261
+ OPC_VMIN_HU = 0x70768000,
262
+ OPC_VMIN_WU = 0x70770000,
263
+ OPC_VMIN_DU = 0x70778000,
264
+ OPC_VMUL_B = 0x70840000,
265
+ OPC_VMUL_H = 0x70848000,
266
+ OPC_VMUL_W = 0x70850000,
267
+ OPC_VMUL_D = 0x70858000,
268
+ OPC_VMUH_B = 0x70860000,
269
+ OPC_VMUH_H = 0x70868000,
270
+ OPC_VMUH_W = 0x70870000,
271
+ OPC_VMUH_D = 0x70878000,
272
+ OPC_VMUH_BU = 0x70880000,
273
+ OPC_VMUH_HU = 0x70888000,
274
+ OPC_VMUH_WU = 0x70890000,
275
+ OPC_VMUH_DU = 0x70898000,
276
+ OPC_VMULWEV_H_B = 0x70900000,
277
+ OPC_VMULWEV_W_H = 0x70908000,
278
+ OPC_VMULWEV_D_W = 0x70910000,
279
+ OPC_VMULWEV_Q_D = 0x70918000,
280
+ OPC_VMULWOD_H_B = 0x70920000,
281
+ OPC_VMULWOD_W_H = 0x70928000,
282
+ OPC_VMULWOD_D_W = 0x70930000,
283
+ OPC_VMULWOD_Q_D = 0x70938000,
284
+ OPC_VMULWEV_H_BU = 0x70980000,
285
+ OPC_VMULWEV_W_HU = 0x70988000,
286
+ OPC_VMULWEV_D_WU = 0x70990000,
287
+ OPC_VMULWEV_Q_DU = 0x70998000,
288
+ OPC_VMULWOD_H_BU = 0x709a0000,
289
+ OPC_VMULWOD_W_HU = 0x709a8000,
290
+ OPC_VMULWOD_D_WU = 0x709b0000,
291
+ OPC_VMULWOD_Q_DU = 0x709b8000,
292
+ OPC_VMULWEV_H_BU_B = 0x70a00000,
293
+ OPC_VMULWEV_W_HU_H = 0x70a08000,
294
+ OPC_VMULWEV_D_WU_W = 0x70a10000,
295
+ OPC_VMULWEV_Q_DU_D = 0x70a18000,
296
+ OPC_VMULWOD_H_BU_B = 0x70a20000,
297
+ OPC_VMULWOD_W_HU_H = 0x70a28000,
298
+ OPC_VMULWOD_D_WU_W = 0x70a30000,
299
+ OPC_VMULWOD_Q_DU_D = 0x70a38000,
300
+ OPC_VMADD_B = 0x70a80000,
301
+ OPC_VMADD_H = 0x70a88000,
302
+ OPC_VMADD_W = 0x70a90000,
303
+ OPC_VMADD_D = 0x70a98000,
304
+ OPC_VMSUB_B = 0x70aa0000,
305
+ OPC_VMSUB_H = 0x70aa8000,
306
+ OPC_VMSUB_W = 0x70ab0000,
307
+ OPC_VMSUB_D = 0x70ab8000,
308
+ OPC_VMADDWEV_H_B = 0x70ac0000,
309
+ OPC_VMADDWEV_W_H = 0x70ac8000,
310
+ OPC_VMADDWEV_D_W = 0x70ad0000,
311
+ OPC_VMADDWEV_Q_D = 0x70ad8000,
312
+ OPC_VMADDWOD_H_B = 0x70ae0000,
313
+ OPC_VMADDWOD_W_H = 0x70ae8000,
314
+ OPC_VMADDWOD_D_W = 0x70af0000,
315
+ OPC_VMADDWOD_Q_D = 0x70af8000,
316
+ OPC_VMADDWEV_H_BU = 0x70b40000,
317
+ OPC_VMADDWEV_W_HU = 0x70b48000,
318
+ OPC_VMADDWEV_D_WU = 0x70b50000,
319
+ OPC_VMADDWEV_Q_DU = 0x70b58000,
320
+ OPC_VMADDWOD_H_BU = 0x70b60000,
321
+ OPC_VMADDWOD_W_HU = 0x70b68000,
322
+ OPC_VMADDWOD_D_WU = 0x70b70000,
323
+ OPC_VMADDWOD_Q_DU = 0x70b78000,
324
+ OPC_VMADDWEV_H_BU_B = 0x70bc0000,
325
+ OPC_VMADDWEV_W_HU_H = 0x70bc8000,
326
+ OPC_VMADDWEV_D_WU_W = 0x70bd0000,
327
+ OPC_VMADDWEV_Q_DU_D = 0x70bd8000,
328
+ OPC_VMADDWOD_H_BU_B = 0x70be0000,
329
+ OPC_VMADDWOD_W_HU_H = 0x70be8000,
330
+ OPC_VMADDWOD_D_WU_W = 0x70bf0000,
331
+ OPC_VMADDWOD_Q_DU_D = 0x70bf8000,
332
+ OPC_VDIV_B = 0x70e00000,
333
+ OPC_VDIV_H = 0x70e08000,
334
+ OPC_VDIV_W = 0x70e10000,
335
+ OPC_VDIV_D = 0x70e18000,
336
+ OPC_VMOD_B = 0x70e20000,
337
+ OPC_VMOD_H = 0x70e28000,
338
+ OPC_VMOD_W = 0x70e30000,
339
+ OPC_VMOD_D = 0x70e38000,
340
+ OPC_VDIV_BU = 0x70e40000,
341
+ OPC_VDIV_HU = 0x70e48000,
342
+ OPC_VDIV_WU = 0x70e50000,
343
+ OPC_VDIV_DU = 0x70e58000,
344
+ OPC_VMOD_BU = 0x70e60000,
345
+ OPC_VMOD_HU = 0x70e68000,
346
+ OPC_VMOD_WU = 0x70e70000,
347
+ OPC_VMOD_DU = 0x70e78000,
348
+ OPC_VSLL_B = 0x70e80000,
349
+ OPC_VSLL_H = 0x70e88000,
350
+ OPC_VSLL_W = 0x70e90000,
351
+ OPC_VSLL_D = 0x70e98000,
352
+ OPC_VSRL_B = 0x70ea0000,
353
+ OPC_VSRL_H = 0x70ea8000,
354
+ OPC_VSRL_W = 0x70eb0000,
355
+ OPC_VSRL_D = 0x70eb8000,
356
+ OPC_VSRA_B = 0x70ec0000,
357
+ OPC_VSRA_H = 0x70ec8000,
358
+ OPC_VSRA_W = 0x70ed0000,
359
+ OPC_VSRA_D = 0x70ed8000,
360
+ OPC_VROTR_B = 0x70ee0000,
361
+ OPC_VROTR_H = 0x70ee8000,
362
+ OPC_VROTR_W = 0x70ef0000,
363
+ OPC_VROTR_D = 0x70ef8000,
364
+ OPC_VSRLR_B = 0x70f00000,
365
+ OPC_VSRLR_H = 0x70f08000,
366
+ OPC_VSRLR_W = 0x70f10000,
367
+ OPC_VSRLR_D = 0x70f18000,
368
+ OPC_VSRAR_B = 0x70f20000,
369
+ OPC_VSRAR_H = 0x70f28000,
370
+ OPC_VSRAR_W = 0x70f30000,
371
+ OPC_VSRAR_D = 0x70f38000,
372
+ OPC_VSRLN_B_H = 0x70f48000,
373
+ OPC_VSRLN_H_W = 0x70f50000,
374
+ OPC_VSRLN_W_D = 0x70f58000,
375
+ OPC_VSRAN_B_H = 0x70f68000,
376
+ OPC_VSRAN_H_W = 0x70f70000,
377
+ OPC_VSRAN_W_D = 0x70f78000,
378
+ OPC_VSRLRN_B_H = 0x70f88000,
379
+ OPC_VSRLRN_H_W = 0x70f90000,
380
+ OPC_VSRLRN_W_D = 0x70f98000,
381
+ OPC_VSRARN_B_H = 0x70fa8000,
382
+ OPC_VSRARN_H_W = 0x70fb0000,
383
+ OPC_VSRARN_W_D = 0x70fb8000,
384
+ OPC_VSSRLN_B_H = 0x70fc8000,
385
+ OPC_VSSRLN_H_W = 0x70fd0000,
386
+ OPC_VSSRLN_W_D = 0x70fd8000,
387
+ OPC_VSSRAN_B_H = 0x70fe8000,
388
+ OPC_VSSRAN_H_W = 0x70ff0000,
389
+ OPC_VSSRAN_W_D = 0x70ff8000,
390
+ OPC_VSSRLRN_B_H = 0x71008000,
391
+ OPC_VSSRLRN_H_W = 0x71010000,
392
+ OPC_VSSRLRN_W_D = 0x71018000,
393
+ OPC_VSSRARN_B_H = 0x71028000,
394
+ OPC_VSSRARN_H_W = 0x71030000,
395
+ OPC_VSSRARN_W_D = 0x71038000,
396
+ OPC_VSSRLN_BU_H = 0x71048000,
397
+ OPC_VSSRLN_HU_W = 0x71050000,
398
+ OPC_VSSRLN_WU_D = 0x71058000,
399
+ OPC_VSSRAN_BU_H = 0x71068000,
400
+ OPC_VSSRAN_HU_W = 0x71070000,
401
+ OPC_VSSRAN_WU_D = 0x71078000,
402
+ OPC_VSSRLRN_BU_H = 0x71088000,
403
+ OPC_VSSRLRN_HU_W = 0x71090000,
404
+ OPC_VSSRLRN_WU_D = 0x71098000,
405
+ OPC_VSSRARN_BU_H = 0x710a8000,
406
+ OPC_VSSRARN_HU_W = 0x710b0000,
407
+ OPC_VSSRARN_WU_D = 0x710b8000,
408
+ OPC_VBITCLR_B = 0x710c0000,
409
+ OPC_VBITCLR_H = 0x710c8000,
410
+ OPC_VBITCLR_W = 0x710d0000,
411
+ OPC_VBITCLR_D = 0x710d8000,
412
+ OPC_VBITSET_B = 0x710e0000,
413
+ OPC_VBITSET_H = 0x710e8000,
414
+ OPC_VBITSET_W = 0x710f0000,
415
+ OPC_VBITSET_D = 0x710f8000,
416
+ OPC_VBITREV_B = 0x71100000,
417
+ OPC_VBITREV_H = 0x71108000,
418
+ OPC_VBITREV_W = 0x71110000,
419
+ OPC_VBITREV_D = 0x71118000,
420
+ OPC_VPACKEV_B = 0x71160000,
421
+ OPC_VPACKEV_H = 0x71168000,
422
+ OPC_VPACKEV_W = 0x71170000,
423
+ OPC_VPACKEV_D = 0x71178000,
424
+ OPC_VPACKOD_B = 0x71180000,
425
+ OPC_VPACKOD_H = 0x71188000,
426
+ OPC_VPACKOD_W = 0x71190000,
427
+ OPC_VPACKOD_D = 0x71198000,
428
+ OPC_VILVL_B = 0x711a0000,
429
+ OPC_VILVL_H = 0x711a8000,
430
+ OPC_VILVL_W = 0x711b0000,
431
+ OPC_VILVL_D = 0x711b8000,
432
+ OPC_VILVH_B = 0x711c0000,
433
+ OPC_VILVH_H = 0x711c8000,
434
+ OPC_VILVH_W = 0x711d0000,
435
+ OPC_VILVH_D = 0x711d8000,
436
+ OPC_VPICKEV_B = 0x711e0000,
437
+ OPC_VPICKEV_H = 0x711e8000,
438
+ OPC_VPICKEV_W = 0x711f0000,
439
+ OPC_VPICKEV_D = 0x711f8000,
440
+ OPC_VPICKOD_B = 0x71200000,
441
+ OPC_VPICKOD_H = 0x71208000,
442
+ OPC_VPICKOD_W = 0x71210000,
443
+ OPC_VPICKOD_D = 0x71218000,
444
+ OPC_VREPLVE_B = 0x71220000,
445
+ OPC_VREPLVE_H = 0x71228000,
446
+ OPC_VREPLVE_W = 0x71230000,
447
+ OPC_VREPLVE_D = 0x71238000,
448
+ OPC_VAND_V = 0x71260000,
449
+ OPC_VOR_V = 0x71268000,
450
+ OPC_VXOR_V = 0x71270000,
451
+ OPC_VNOR_V = 0x71278000,
452
+ OPC_VANDN_V = 0x71280000,
453
+ OPC_VORN_V = 0x71288000,
454
+ OPC_VFRSTP_B = 0x712b0000,
455
+ OPC_VFRSTP_H = 0x712b8000,
456
+ OPC_VADD_Q = 0x712d0000,
457
+ OPC_VSUB_Q = 0x712d8000,
458
+ OPC_VSIGNCOV_B = 0x712e0000,
459
+ OPC_VSIGNCOV_H = 0x712e8000,
460
+ OPC_VSIGNCOV_W = 0x712f0000,
461
+ OPC_VSIGNCOV_D = 0x712f8000,
462
+ OPC_VFADD_S = 0x71308000,
463
+ OPC_VFADD_D = 0x71310000,
464
+ OPC_VFSUB_S = 0x71328000,
465
+ OPC_VFSUB_D = 0x71330000,
466
+ OPC_VFMUL_S = 0x71388000,
467
+ OPC_VFMUL_D = 0x71390000,
468
+ OPC_VFDIV_S = 0x713a8000,
469
+ OPC_VFDIV_D = 0x713b0000,
470
+ OPC_VFMAX_S = 0x713c8000,
471
+ OPC_VFMAX_D = 0x713d0000,
472
+ OPC_VFMIN_S = 0x713e8000,
473
+ OPC_VFMIN_D = 0x713f0000,
474
+ OPC_VFMAXA_S = 0x71408000,
475
+ OPC_VFMAXA_D = 0x71410000,
476
+ OPC_VFMINA_S = 0x71428000,
477
+ OPC_VFMINA_D = 0x71430000,
478
+ OPC_VFCVT_H_S = 0x71460000,
479
+ OPC_VFCVT_S_D = 0x71468000,
480
+ OPC_VFFINT_S_L = 0x71480000,
481
+ OPC_VFTINT_W_D = 0x71498000,
482
+ OPC_VFTINTRM_W_D = 0x714a0000,
483
+ OPC_VFTINTRP_W_D = 0x714a8000,
484
+ OPC_VFTINTRZ_W_D = 0x714b0000,
485
+ OPC_VFTINTRNE_W_D = 0x714b8000,
486
+ OPC_VSHUF_H = 0x717a8000,
487
+ OPC_VSHUF_W = 0x717b0000,
488
+ OPC_VSHUF_D = 0x717b8000,
489
+ OPC_VSEQI_B = 0x72800000,
490
+ OPC_VSEQI_H = 0x72808000,
491
+ OPC_VSEQI_W = 0x72810000,
492
+ OPC_VSEQI_D = 0x72818000,
493
+ OPC_VSLEI_B = 0x72820000,
494
+ OPC_VSLEI_H = 0x72828000,
495
+ OPC_VSLEI_W = 0x72830000,
496
+ OPC_VSLEI_D = 0x72838000,
497
+ OPC_VSLEI_BU = 0x72840000,
498
+ OPC_VSLEI_HU = 0x72848000,
499
+ OPC_VSLEI_WU = 0x72850000,
500
+ OPC_VSLEI_DU = 0x72858000,
501
+ OPC_VSLTI_B = 0x72860000,
502
+ OPC_VSLTI_H = 0x72868000,
503
+ OPC_VSLTI_W = 0x72870000,
504
+ OPC_VSLTI_D = 0x72878000,
505
+ OPC_VSLTI_BU = 0x72880000,
506
+ OPC_VSLTI_HU = 0x72888000,
507
+ OPC_VSLTI_WU = 0x72890000,
508
+ OPC_VSLTI_DU = 0x72898000,
509
+ OPC_VADDI_BU = 0x728a0000,
510
+ OPC_VADDI_HU = 0x728a8000,
511
+ OPC_VADDI_WU = 0x728b0000,
512
+ OPC_VADDI_DU = 0x728b8000,
513
+ OPC_VSUBI_BU = 0x728c0000,
514
+ OPC_VSUBI_HU = 0x728c8000,
515
+ OPC_VSUBI_WU = 0x728d0000,
516
+ OPC_VSUBI_DU = 0x728d8000,
517
+ OPC_VBSLL_V = 0x728e0000,
518
+ OPC_VBSRL_V = 0x728e8000,
519
+ OPC_VMAXI_B = 0x72900000,
520
+ OPC_VMAXI_H = 0x72908000,
521
+ OPC_VMAXI_W = 0x72910000,
522
+ OPC_VMAXI_D = 0x72918000,
523
+ OPC_VMINI_B = 0x72920000,
524
+ OPC_VMINI_H = 0x72928000,
525
+ OPC_VMINI_W = 0x72930000,
526
+ OPC_VMINI_D = 0x72938000,
527
+ OPC_VMAXI_BU = 0x72940000,
528
+ OPC_VMAXI_HU = 0x72948000,
529
+ OPC_VMAXI_WU = 0x72950000,
530
+ OPC_VMAXI_DU = 0x72958000,
531
+ OPC_VMINI_BU = 0x72960000,
532
+ OPC_VMINI_HU = 0x72968000,
533
+ OPC_VMINI_WU = 0x72970000,
534
+ OPC_VMINI_DU = 0x72978000,
535
+ OPC_VFRSTPI_B = 0x729a0000,
536
+ OPC_VFRSTPI_H = 0x729a8000,
537
+ OPC_VCLO_B = 0x729c0000,
538
+ OPC_VCLO_H = 0x729c0400,
539
+ OPC_VCLO_W = 0x729c0800,
540
+ OPC_VCLO_D = 0x729c0c00,
541
+ OPC_VCLZ_B = 0x729c1000,
542
+ OPC_VCLZ_H = 0x729c1400,
543
+ OPC_VCLZ_W = 0x729c1800,
544
+ OPC_VCLZ_D = 0x729c1c00,
545
+ OPC_VPCNT_B = 0x729c2000,
546
+ OPC_VPCNT_H = 0x729c2400,
547
+ OPC_VPCNT_W = 0x729c2800,
548
+ OPC_VPCNT_D = 0x729c2c00,
549
+ OPC_VNEG_B = 0x729c3000,
550
+ OPC_VNEG_H = 0x729c3400,
551
+ OPC_VNEG_W = 0x729c3800,
552
+ OPC_VNEG_D = 0x729c3c00,
553
+ OPC_VMSKLTZ_B = 0x729c4000,
554
+ OPC_VMSKLTZ_H = 0x729c4400,
555
+ OPC_VMSKLTZ_W = 0x729c4800,
556
+ OPC_VMSKLTZ_D = 0x729c4c00,
557
+ OPC_VMSKGEZ_B = 0x729c5000,
558
+ OPC_VMSKNZ_B = 0x729c6000,
559
+ OPC_VSETEQZ_V = 0x729c9800,
560
+ OPC_VSETNEZ_V = 0x729c9c00,
561
+ OPC_VSETANYEQZ_B = 0x729ca000,
562
+ OPC_VSETANYEQZ_H = 0x729ca400,
563
+ OPC_VSETANYEQZ_W = 0x729ca800,
564
+ OPC_VSETANYEQZ_D = 0x729cac00,
565
+ OPC_VSETALLNEZ_B = 0x729cb000,
566
+ OPC_VSETALLNEZ_H = 0x729cb400,
567
+ OPC_VSETALLNEZ_W = 0x729cb800,
568
+ OPC_VSETALLNEZ_D = 0x729cbc00,
569
+ OPC_VFLOGB_S = 0x729cc400,
570
+ OPC_VFLOGB_D = 0x729cc800,
571
+ OPC_VFCLASS_S = 0x729cd400,
572
+ OPC_VFCLASS_D = 0x729cd800,
573
+ OPC_VFSQRT_S = 0x729ce400,
574
+ OPC_VFSQRT_D = 0x729ce800,
575
+ OPC_VFRECIP_S = 0x729cf400,
576
+ OPC_VFRECIP_D = 0x729cf800,
577
+ OPC_VFRSQRT_S = 0x729d0400,
578
+ OPC_VFRSQRT_D = 0x729d0800,
579
+ OPC_VFRINT_S = 0x729d3400,
580
+ OPC_VFRINT_D = 0x729d3800,
581
+ OPC_VFRINTRM_S = 0x729d4400,
582
+ OPC_VFRINTRM_D = 0x729d4800,
583
+ OPC_VFRINTRP_S = 0x729d5400,
584
+ OPC_VFRINTRP_D = 0x729d5800,
585
+ OPC_VFRINTRZ_S = 0x729d6400,
586
+ OPC_VFRINTRZ_D = 0x729d6800,
587
+ OPC_VFRINTRNE_S = 0x729d7400,
588
+ OPC_VFRINTRNE_D = 0x729d7800,
589
+ OPC_VFCVTL_S_H = 0x729de800,
590
+ OPC_VFCVTH_S_H = 0x729dec00,
591
+ OPC_VFCVTL_D_S = 0x729df000,
592
+ OPC_VFCVTH_D_S = 0x729df400,
593
+ OPC_VFFINT_S_W = 0x729e0000,
594
+ OPC_VFFINT_S_WU = 0x729e0400,
595
+ OPC_VFFINT_D_L = 0x729e0800,
596
+ OPC_VFFINT_D_LU = 0x729e0c00,
597
+ OPC_VFFINTL_D_W = 0x729e1000,
598
+ OPC_VFFINTH_D_W = 0x729e1400,
599
+ OPC_VFTINT_W_S = 0x729e3000,
600
+ OPC_VFTINT_L_D = 0x729e3400,
601
+ OPC_VFTINTRM_W_S = 0x729e3800,
602
+ OPC_VFTINTRM_L_D = 0x729e3c00,
603
+ OPC_VFTINTRP_W_S = 0x729e4000,
604
+ OPC_VFTINTRP_L_D = 0x729e4400,
605
+ OPC_VFTINTRZ_W_S = 0x729e4800,
606
+ OPC_VFTINTRZ_L_D = 0x729e4c00,
607
+ OPC_VFTINTRNE_W_S = 0x729e5000,
608
+ OPC_VFTINTRNE_L_D = 0x729e5400,
609
+ OPC_VFTINT_WU_S = 0x729e5800,
610
+ OPC_VFTINT_LU_D = 0x729e5c00,
611
+ OPC_VFTINTRZ_WU_S = 0x729e7000,
612
+ OPC_VFTINTRZ_LU_D = 0x729e7400,
613
+ OPC_VFTINTL_L_S = 0x729e8000,
614
+ OPC_VFTINTH_L_S = 0x729e8400,
615
+ OPC_VFTINTRML_L_S = 0x729e8800,
616
+ OPC_VFTINTRMH_L_S = 0x729e8c00,
617
+ OPC_VFTINTRPL_L_S = 0x729e9000,
618
+ OPC_VFTINTRPH_L_S = 0x729e9400,
619
+ OPC_VFTINTRZL_L_S = 0x729e9800,
620
+ OPC_VFTINTRZH_L_S = 0x729e9c00,
621
+ OPC_VFTINTRNEL_L_S = 0x729ea000,
622
+ OPC_VFTINTRNEH_L_S = 0x729ea400,
623
+ OPC_VEXTH_H_B = 0x729ee000,
624
+ OPC_VEXTH_W_H = 0x729ee400,
625
+ OPC_VEXTH_D_W = 0x729ee800,
626
+ OPC_VEXTH_Q_D = 0x729eec00,
627
+ OPC_VEXTH_HU_BU = 0x729ef000,
628
+ OPC_VEXTH_WU_HU = 0x729ef400,
629
+ OPC_VEXTH_DU_WU = 0x729ef800,
630
+ OPC_VEXTH_QU_DU = 0x729efc00,
631
+ OPC_VREPLGR2VR_B = 0x729f0000,
632
+ OPC_VREPLGR2VR_H = 0x729f0400,
633
+ OPC_VREPLGR2VR_W = 0x729f0800,
634
+ OPC_VREPLGR2VR_D = 0x729f0c00,
635
+ OPC_VROTRI_B = 0x72a02000,
636
+ OPC_VROTRI_H = 0x72a04000,
637
+ OPC_VROTRI_W = 0x72a08000,
638
+ OPC_VROTRI_D = 0x72a10000,
639
+ OPC_VSRLRI_B = 0x72a42000,
640
+ OPC_VSRLRI_H = 0x72a44000,
641
+ OPC_VSRLRI_W = 0x72a48000,
642
+ OPC_VSRLRI_D = 0x72a50000,
643
+ OPC_VSRARI_B = 0x72a82000,
644
+ OPC_VSRARI_H = 0x72a84000,
645
+ OPC_VSRARI_W = 0x72a88000,
646
+ OPC_VSRARI_D = 0x72a90000,
647
+ OPC_VINSGR2VR_B = 0x72eb8000,
648
+ OPC_VINSGR2VR_H = 0x72ebc000,
649
+ OPC_VINSGR2VR_W = 0x72ebe000,
650
+ OPC_VINSGR2VR_D = 0x72ebf000,
651
+ OPC_VPICKVE2GR_B = 0x72ef8000,
652
+ OPC_VPICKVE2GR_H = 0x72efc000,
653
+ OPC_VPICKVE2GR_W = 0x72efe000,
654
+ OPC_VPICKVE2GR_D = 0x72eff000,
655
+ OPC_VPICKVE2GR_BU = 0x72f38000,
656
+ OPC_VPICKVE2GR_HU = 0x72f3c000,
657
+ OPC_VPICKVE2GR_WU = 0x72f3e000,
658
+ OPC_VPICKVE2GR_DU = 0x72f3f000,
659
+ OPC_VREPLVEI_B = 0x72f78000,
660
+ OPC_VREPLVEI_H = 0x72f7c000,
661
+ OPC_VREPLVEI_W = 0x72f7e000,
662
+ OPC_VREPLVEI_D = 0x72f7f000,
663
+ OPC_VSLLWIL_H_B = 0x73082000,
664
+ OPC_VSLLWIL_W_H = 0x73084000,
665
+ OPC_VSLLWIL_D_W = 0x73088000,
666
+ OPC_VEXTL_Q_D = 0x73090000,
667
+ OPC_VSLLWIL_HU_BU = 0x730c2000,
668
+ OPC_VSLLWIL_WU_HU = 0x730c4000,
669
+ OPC_VSLLWIL_DU_WU = 0x730c8000,
670
+ OPC_VEXTL_QU_DU = 0x730d0000,
671
+ OPC_VBITCLRI_B = 0x73102000,
672
+ OPC_VBITCLRI_H = 0x73104000,
673
+ OPC_VBITCLRI_W = 0x73108000,
674
+ OPC_VBITCLRI_D = 0x73110000,
675
+ OPC_VBITSETI_B = 0x73142000,
676
+ OPC_VBITSETI_H = 0x73144000,
677
+ OPC_VBITSETI_W = 0x73148000,
678
+ OPC_VBITSETI_D = 0x73150000,
679
+ OPC_VBITREVI_B = 0x73182000,
680
+ OPC_VBITREVI_H = 0x73184000,
681
+ OPC_VBITREVI_W = 0x73188000,
682
+ OPC_VBITREVI_D = 0x73190000,
683
+ OPC_VSAT_B = 0x73242000,
684
+ OPC_VSAT_H = 0x73244000,
685
+ OPC_VSAT_W = 0x73248000,
686
+ OPC_VSAT_D = 0x73250000,
687
+ OPC_VSAT_BU = 0x73282000,
688
+ OPC_VSAT_HU = 0x73284000,
689
+ OPC_VSAT_WU = 0x73288000,
690
+ OPC_VSAT_DU = 0x73290000,
691
+ OPC_VSLLI_B = 0x732c2000,
692
+ OPC_VSLLI_H = 0x732c4000,
693
+ OPC_VSLLI_W = 0x732c8000,
694
+ OPC_VSLLI_D = 0x732d0000,
695
+ OPC_VSRLI_B = 0x73302000,
696
+ OPC_VSRLI_H = 0x73304000,
697
+ OPC_VSRLI_W = 0x73308000,
698
+ OPC_VSRLI_D = 0x73310000,
699
+ OPC_VSRAI_B = 0x73342000,
700
+ OPC_VSRAI_H = 0x73344000,
701
+ OPC_VSRAI_W = 0x73348000,
702
+ OPC_VSRAI_D = 0x73350000,
703
+ OPC_VSRLNI_B_H = 0x73404000,
704
+ OPC_VSRLNI_H_W = 0x73408000,
705
+ OPC_VSRLNI_W_D = 0x73410000,
706
+ OPC_VSRLNI_D_Q = 0x73420000,
707
+ OPC_VSRLRNI_B_H = 0x73444000,
708
+ OPC_VSRLRNI_H_W = 0x73448000,
709
+ OPC_VSRLRNI_W_D = 0x73450000,
710
+ OPC_VSRLRNI_D_Q = 0x73460000,
711
+ OPC_VSSRLNI_B_H = 0x73484000,
712
+ OPC_VSSRLNI_H_W = 0x73488000,
713
+ OPC_VSSRLNI_W_D = 0x73490000,
714
+ OPC_VSSRLNI_D_Q = 0x734a0000,
715
+ OPC_VSSRLNI_BU_H = 0x734c4000,
716
+ OPC_VSSRLNI_HU_W = 0x734c8000,
717
+ OPC_VSSRLNI_WU_D = 0x734d0000,
718
+ OPC_VSSRLNI_DU_Q = 0x734e0000,
719
+ OPC_VSSRLRNI_B_H = 0x73504000,
720
+ OPC_VSSRLRNI_H_W = 0x73508000,
721
+ OPC_VSSRLRNI_W_D = 0x73510000,
722
+ OPC_VSSRLRNI_D_Q = 0x73520000,
723
+ OPC_VSSRLRNI_BU_H = 0x73544000,
724
+ OPC_VSSRLRNI_HU_W = 0x73548000,
725
+ OPC_VSSRLRNI_WU_D = 0x73550000,
726
+ OPC_VSSRLRNI_DU_Q = 0x73560000,
727
+ OPC_VSRANI_B_H = 0x73584000,
728
+ OPC_VSRANI_H_W = 0x73588000,
729
+ OPC_VSRANI_W_D = 0x73590000,
730
+ OPC_VSRANI_D_Q = 0x735a0000,
731
+ OPC_VSRARNI_B_H = 0x735c4000,
732
+ OPC_VSRARNI_H_W = 0x735c8000,
733
+ OPC_VSRARNI_W_D = 0x735d0000,
734
+ OPC_VSRARNI_D_Q = 0x735e0000,
735
+ OPC_VSSRANI_B_H = 0x73604000,
736
+ OPC_VSSRANI_H_W = 0x73608000,
737
+ OPC_VSSRANI_W_D = 0x73610000,
738
+ OPC_VSSRANI_D_Q = 0x73620000,
739
+ OPC_VSSRANI_BU_H = 0x73644000,
740
+ OPC_VSSRANI_HU_W = 0x73648000,
741
+ OPC_VSSRANI_WU_D = 0x73650000,
742
+ OPC_VSSRANI_DU_Q = 0x73660000,
743
+ OPC_VSSRARNI_B_H = 0x73684000,
744
+ OPC_VSSRARNI_H_W = 0x73688000,
745
+ OPC_VSSRARNI_W_D = 0x73690000,
746
+ OPC_VSSRARNI_D_Q = 0x736a0000,
747
+ OPC_VSSRARNI_BU_H = 0x736c4000,
748
+ OPC_VSSRARNI_HU_W = 0x736c8000,
749
+ OPC_VSSRARNI_WU_D = 0x736d0000,
750
+ OPC_VSSRARNI_DU_Q = 0x736e0000,
751
+ OPC_VEXTRINS_D = 0x73800000,
752
+ OPC_VEXTRINS_W = 0x73840000,
753
+ OPC_VEXTRINS_H = 0x73880000,
754
+ OPC_VEXTRINS_B = 0x738c0000,
755
+ OPC_VSHUF4I_B = 0x73900000,
756
+ OPC_VSHUF4I_H = 0x73940000,
757
+ OPC_VSHUF4I_W = 0x73980000,
758
+ OPC_VSHUF4I_D = 0x739c0000,
759
+ OPC_VBITSELI_B = 0x73c40000,
760
+ OPC_VANDI_B = 0x73d00000,
761
+ OPC_VORI_B = 0x73d40000,
762
+ OPC_VXORI_B = 0x73d80000,
763
+ OPC_VNORI_B = 0x73dc0000,
764
+ OPC_VLDI = 0x73e00000,
765
+ OPC_VPERMI_W = 0x73e40000,
766
} LoongArchInsn;
767
768
static int32_t __attribute__((unused))
769
@@ -XXX,XX +XXX,XX @@ encode_djk_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k)
770
return opc | d | j << 5 | k << 10;
142
}
771
}
143
772
144
-void translator_loop(const TranslatorOps *ops, DisasContextBase *db,
773
+static int32_t __attribute__((unused))
145
- CPUState *cpu, TranslationBlock *tb, int max_insns)
774
+encode_djka_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
146
+void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
775
+ uint32_t a)
147
+ target_ulong pc, void *host_pc,
776
+{
148
+ const TranslatorOps *ops, DisasContextBase *db)
777
+ return opc | d | j << 5 | k << 10 | a << 15;
778
+}
779
+
780
static int32_t __attribute__((unused))
781
encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
782
uint32_t m)
783
@@ -XXX,XX +XXX,XX @@ encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
784
return opc | d | j << 5 | k << 10 | m << 16;
785
}
786
787
+static int32_t __attribute__((unused))
788
+encode_djkn_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
789
+ uint32_t n)
790
+{
791
+ return opc | d | j << 5 | k << 10 | n << 18;
792
+}
793
+
794
static int32_t __attribute__((unused))
795
encode_dk_slots(LoongArchInsn opc, uint32_t d, uint32_t k)
149
{
796
{
150
uint32_t cflags = tb_cflags(tb);
797
return opc | d | k << 10;
151
bool plugin_enabled;
798
}
152
799
153
/* Initialize DisasContext */
800
+static int32_t __attribute__((unused))
154
db->tb = tb;
801
+encode_cdvj_insn(LoongArchInsn opc, TCGReg cd, TCGReg vj)
155
- db->pc_first = tb->pc;
802
+{
156
- db->pc_next = db->pc_first;
803
+ tcg_debug_assert(cd >= 0 && cd <= 0x7);
157
+ db->pc_first = pc;
804
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
158
+ db->pc_next = pc;
805
+ return encode_dj_slots(opc, cd, vj & 0x1f);
159
db->is_jmp = DISAS_NEXT;
806
+}
160
db->num_insns = 0;
807
+
161
db->max_insns = max_insns;
808
static int32_t __attribute__((unused))
162
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
809
encode_dj_insn(LoongArchInsn opc, TCGReg d, TCGReg j)
163
index XXXXXXX..XXXXXXX 100644
164
--- a/target/alpha/translate.c
165
+++ b/target/alpha/translate.c
166
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps alpha_tr_ops = {
167
.disas_log = alpha_tr_disas_log,
168
};
169
170
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
171
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
172
+ target_ulong pc, void *host_pc)
173
{
810
{
174
DisasContext dc;
811
@@ -XXX,XX +XXX,XX @@ encode_dsj20_insn(LoongArchInsn opc, TCGReg d, int32_t sj20)
175
- translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
812
return encode_dj_slots(opc, d, sj20 & 0xfffff);
176
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
177
}
813
}
178
814
179
void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
815
+static int32_t __attribute__((unused))
180
diff --git a/target/arm/translate.c b/target/arm/translate.c
816
+encode_dvjuk1_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk1)
181
index XXXXXXX..XXXXXXX 100644
817
+{
182
--- a/target/arm/translate.c
818
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
183
+++ b/target/arm/translate.c
819
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
184
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps thumb_translator_ops = {
820
+ tcg_debug_assert(uk1 <= 0x1);
185
};
821
+ return encode_djk_slots(opc, d, vj & 0x1f, uk1);
186
822
+}
187
/* generate intermediate code for basic block 'tb'. */
823
+
188
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
824
+static int32_t __attribute__((unused))
189
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
825
+encode_dvjuk2_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk2)
190
+ target_ulong pc, void *host_pc)
826
+{
827
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
828
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
829
+ tcg_debug_assert(uk2 <= 0x3);
830
+ return encode_djk_slots(opc, d, vj & 0x1f, uk2);
831
+}
832
+
833
+static int32_t __attribute__((unused))
834
+encode_dvjuk3_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk3)
835
+{
836
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
837
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
838
+ tcg_debug_assert(uk3 <= 0x7);
839
+ return encode_djk_slots(opc, d, vj & 0x1f, uk3);
840
+}
841
+
842
+static int32_t __attribute__((unused))
843
+encode_dvjuk4_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk4)
844
+{
845
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
846
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
847
+ tcg_debug_assert(uk4 <= 0xf);
848
+ return encode_djk_slots(opc, d, vj & 0x1f, uk4);
849
+}
850
+
851
static int32_t __attribute__((unused))
852
encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16)
191
{
853
{
192
DisasContext dc = { };
854
@@ -XXX,XX +XXX,XX @@ encode_ud15_insn(LoongArchInsn opc, uint32_t ud15)
193
const TranslatorOps *ops = &arm_translator_ops;
855
return encode_d_slot(opc, ud15);
194
@@ -XXX,XX +XXX,XX @@ void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
195
}
196
#endif
197
198
- translator_loop(ops, &dc.base, cpu, tb, max_insns);
199
+ translator_loop(cpu, tb, max_insns, pc, host_pc, ops, &dc.base);
200
}
856
}
201
857
202
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
858
+static int32_t __attribute__((unused))
203
diff --git a/target/avr/translate.c b/target/avr/translate.c
859
+encode_vdj_insn(LoongArchInsn opc, TCGReg vd, TCGReg j)
204
index XXXXXXX..XXXXXXX 100644
860
+{
205
--- a/target/avr/translate.c
861
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
206
+++ b/target/avr/translate.c
862
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
207
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps avr_tr_ops = {
863
+ return encode_dj_slots(opc, vd & 0x1f, j);
208
.disas_log = avr_tr_disas_log,
864
+}
209
};
865
+
210
866
+static int32_t __attribute__((unused))
211
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
867
+encode_vdjk_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, TCGReg k)
212
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
868
+{
213
+ target_ulong pc, void *host_pc)
869
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
214
{
870
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
215
DisasContext dc = { };
871
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
216
- translator_loop(&avr_tr_ops, &dc.base, cs, tb, max_insns);
872
+ return encode_djk_slots(opc, vd & 0x1f, j, k);
217
+ translator_loop(cs, tb, max_insns, pc, host_pc, &avr_tr_ops, &dc.base);
873
+}
874
+
875
+static int32_t __attribute__((unused))
876
+encode_vdjsk10_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk10)
877
+{
878
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
879
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
880
+ tcg_debug_assert(sk10 >= -0x200 && sk10 <= 0x1ff);
881
+ return encode_djk_slots(opc, vd & 0x1f, j, sk10 & 0x3ff);
882
+}
883
+
884
+static int32_t __attribute__((unused))
885
+encode_vdjsk11_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk11)
886
+{
887
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
888
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
889
+ tcg_debug_assert(sk11 >= -0x400 && sk11 <= 0x3ff);
890
+ return encode_djk_slots(opc, vd & 0x1f, j, sk11 & 0x7ff);
891
+}
892
+
893
+static int32_t __attribute__((unused))
894
+encode_vdjsk12_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk12)
895
+{
896
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
897
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
898
+ tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff);
899
+ return encode_djk_slots(opc, vd & 0x1f, j, sk12 & 0xfff);
900
+}
901
+
902
+static int32_t __attribute__((unused))
903
+encode_vdjsk8un1_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
904
+ uint32_t un1)
905
+{
906
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
907
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
908
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
909
+ tcg_debug_assert(un1 <= 0x1);
910
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un1);
911
+}
912
+
913
+static int32_t __attribute__((unused))
914
+encode_vdjsk8un2_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
915
+ uint32_t un2)
916
+{
917
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
918
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
919
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
920
+ tcg_debug_assert(un2 <= 0x3);
921
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un2);
922
+}
923
+
924
+static int32_t __attribute__((unused))
925
+encode_vdjsk8un3_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
926
+ uint32_t un3)
927
+{
928
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
929
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
930
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
931
+ tcg_debug_assert(un3 <= 0x7);
932
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un3);
933
+}
934
+
935
+static int32_t __attribute__((unused))
936
+encode_vdjsk8un4_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
937
+ uint32_t un4)
938
+{
939
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
940
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
941
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
942
+ tcg_debug_assert(un4 <= 0xf);
943
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un4);
944
+}
945
+
946
+static int32_t __attribute__((unused))
947
+encode_vdjsk9_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk9)
948
+{
949
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
950
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
951
+ tcg_debug_assert(sk9 >= -0x100 && sk9 <= 0xff);
952
+ return encode_djk_slots(opc, vd & 0x1f, j, sk9 & 0x1ff);
953
+}
954
+
955
+static int32_t __attribute__((unused))
956
+encode_vdjuk1_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk1)
957
+{
958
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
959
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
960
+ tcg_debug_assert(uk1 <= 0x1);
961
+ return encode_djk_slots(opc, vd & 0x1f, j, uk1);
962
+}
963
+
964
+static int32_t __attribute__((unused))
965
+encode_vdjuk2_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk2)
966
+{
967
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
968
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
969
+ tcg_debug_assert(uk2 <= 0x3);
970
+ return encode_djk_slots(opc, vd & 0x1f, j, uk2);
971
+}
972
+
973
+static int32_t __attribute__((unused))
974
+encode_vdjuk3_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk3)
975
+{
976
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
977
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
978
+ tcg_debug_assert(uk3 <= 0x7);
979
+ return encode_djk_slots(opc, vd & 0x1f, j, uk3);
980
+}
981
+
982
+static int32_t __attribute__((unused))
983
+encode_vdjuk4_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk4)
984
+{
985
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
986
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
987
+ tcg_debug_assert(uk4 <= 0xf);
988
+ return encode_djk_slots(opc, vd & 0x1f, j, uk4);
989
+}
990
+
991
+static int32_t __attribute__((unused))
992
+encode_vdsj13_insn(LoongArchInsn opc, TCGReg vd, int32_t sj13)
993
+{
994
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
995
+ tcg_debug_assert(sj13 >= -0x1000 && sj13 <= 0xfff);
996
+ return encode_dj_slots(opc, vd & 0x1f, sj13 & 0x1fff);
997
+}
998
+
999
+static int32_t __attribute__((unused))
1000
+encode_vdvj_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj)
1001
+{
1002
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1003
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1004
+ return encode_dj_slots(opc, vd & 0x1f, vj & 0x1f);
1005
+}
1006
+
1007
+static int32_t __attribute__((unused))
1008
+encode_vdvjk_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg k)
1009
+{
1010
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1011
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1012
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
1013
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, k);
1014
+}
1015
+
1016
+static int32_t __attribute__((unused))
1017
+encode_vdvjsk5_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, int32_t sk5)
1018
+{
1019
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1020
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1021
+ tcg_debug_assert(sk5 >= -0x10 && sk5 <= 0xf);
1022
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, sk5 & 0x1f);
1023
+}
1024
+
1025
+static int32_t __attribute__((unused))
1026
+encode_vdvjuk1_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk1)
1027
+{
1028
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1029
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1030
+ tcg_debug_assert(uk1 <= 0x1);
1031
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk1);
1032
+}
1033
+
1034
+static int32_t __attribute__((unused))
1035
+encode_vdvjuk2_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk2)
1036
+{
1037
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1038
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1039
+ tcg_debug_assert(uk2 <= 0x3);
1040
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk2);
1041
+}
1042
+
1043
+static int32_t __attribute__((unused))
1044
+encode_vdvjuk3_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk3)
1045
+{
1046
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1047
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1048
+ tcg_debug_assert(uk3 <= 0x7);
1049
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk3);
1050
+}
1051
+
1052
+static int32_t __attribute__((unused))
1053
+encode_vdvjuk4_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk4)
1054
+{
1055
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1056
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1057
+ tcg_debug_assert(uk4 <= 0xf);
1058
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk4);
1059
+}
1060
+
1061
+static int32_t __attribute__((unused))
1062
+encode_vdvjuk5_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk5)
1063
+{
1064
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1065
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1066
+ tcg_debug_assert(uk5 <= 0x1f);
1067
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk5);
1068
+}
1069
+
1070
+static int32_t __attribute__((unused))
1071
+encode_vdvjuk6_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk6)
1072
+{
1073
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1074
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1075
+ tcg_debug_assert(uk6 <= 0x3f);
1076
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk6);
1077
+}
1078
+
1079
+static int32_t __attribute__((unused))
1080
+encode_vdvjuk7_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk7)
1081
+{
1082
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1083
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1084
+ tcg_debug_assert(uk7 <= 0x7f);
1085
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk7);
1086
+}
1087
+
1088
+static int32_t __attribute__((unused))
1089
+encode_vdvjuk8_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk8)
1090
+{
1091
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1092
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1093
+ tcg_debug_assert(uk8 <= 0xff);
1094
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk8);
1095
+}
1096
+
1097
+static int32_t __attribute__((unused))
1098
+encode_vdvjvk_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg vk)
1099
+{
1100
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1101
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1102
+ tcg_debug_assert(vk >= 0x20 && vk <= 0x3f);
1103
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, vk & 0x1f);
1104
+}
1105
+
1106
+static int32_t __attribute__((unused))
1107
+encode_vdvjvkva_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg vk,
1108
+ TCGReg va)
1109
+{
1110
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1111
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1112
+ tcg_debug_assert(vk >= 0x20 && vk <= 0x3f);
1113
+ tcg_debug_assert(va >= 0x20 && va <= 0x3f);
1114
+ return encode_djka_slots(opc, vd & 0x1f, vj & 0x1f, vk & 0x1f, va & 0x1f);
1115
+}
1116
+
1117
/* Emits the `clz.w d, j` instruction. */
1118
static void __attribute__((unused))
1119
tcg_out_opc_clz_w(TCGContext *s, TCGReg d, TCGReg j)
1120
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
1121
tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12));
218
}
1122
}
219
1123
220
void restore_state_to_opc(CPUAVRState *env, TranslationBlock *tb,
1124
+/* Emits the `vfmadd.s vd, vj, vk, va` instruction. */
221
diff --git a/target/cris/translate.c b/target/cris/translate.c
1125
+static void __attribute__((unused))
222
index XXXXXXX..XXXXXXX 100644
1126
+tcg_out_opc_vfmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
223
--- a/target/cris/translate.c
1127
+{
224
+++ b/target/cris/translate.c
1128
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_S, vd, vj, vk, va));
225
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps cris_tr_ops = {
1129
+}
226
.disas_log = cris_tr_disas_log,
1130
+
227
};
1131
+/* Emits the `vfmadd.d vd, vj, vk, va` instruction. */
228
1132
+static void __attribute__((unused))
229
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1133
+tcg_out_opc_vfmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
230
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1134
+{
231
+ target_ulong pc, void *host_pc)
1135
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_D, vd, vj, vk, va));
232
{
1136
+}
233
DisasContext dc;
1137
+
234
- translator_loop(&cris_tr_ops, &dc.base, cs, tb, max_insns);
1138
+/* Emits the `vfmsub.s vd, vj, vk, va` instruction. */
235
+ translator_loop(cs, tb, max_insns, pc, host_pc, &cris_tr_ops, &dc.base);
1139
+static void __attribute__((unused))
1140
+tcg_out_opc_vfmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1141
+{
1142
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_S, vd, vj, vk, va));
1143
+}
1144
+
1145
+/* Emits the `vfmsub.d vd, vj, vk, va` instruction. */
1146
+static void __attribute__((unused))
1147
+tcg_out_opc_vfmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1148
+{
1149
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_D, vd, vj, vk, va));
1150
+}
1151
+
1152
+/* Emits the `vfnmadd.s vd, vj, vk, va` instruction. */
1153
+static void __attribute__((unused))
1154
+tcg_out_opc_vfnmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1155
+{
1156
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_S, vd, vj, vk, va));
1157
+}
1158
+
1159
+/* Emits the `vfnmadd.d vd, vj, vk, va` instruction. */
1160
+static void __attribute__((unused))
1161
+tcg_out_opc_vfnmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1162
+{
1163
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_D, vd, vj, vk, va));
1164
+}
1165
+
1166
+/* Emits the `vfnmsub.s vd, vj, vk, va` instruction. */
1167
+static void __attribute__((unused))
1168
+tcg_out_opc_vfnmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1169
+{
1170
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_S, vd, vj, vk, va));
1171
+}
1172
+
1173
+/* Emits the `vfnmsub.d vd, vj, vk, va` instruction. */
1174
+static void __attribute__((unused))
1175
+tcg_out_opc_vfnmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1176
+{
1177
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_D, vd, vj, vk, va));
1178
+}
1179
+
1180
+/* Emits the `vfcmp.caf.s vd, vj, vk` instruction. */
1181
+static void __attribute__((unused))
1182
+tcg_out_opc_vfcmp_caf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1183
+{
1184
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_S, vd, vj, vk));
1185
+}
1186
+
1187
+/* Emits the `vfcmp.saf.s vd, vj, vk` instruction. */
1188
+static void __attribute__((unused))
1189
+tcg_out_opc_vfcmp_saf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1190
+{
1191
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_S, vd, vj, vk));
1192
+}
1193
+
1194
+/* Emits the `vfcmp.clt.s vd, vj, vk` instruction. */
1195
+static void __attribute__((unused))
1196
+tcg_out_opc_vfcmp_clt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1197
+{
1198
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_S, vd, vj, vk));
1199
+}
1200
+
1201
+/* Emits the `vfcmp.slt.s vd, vj, vk` instruction. */
1202
+static void __attribute__((unused))
1203
+tcg_out_opc_vfcmp_slt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1204
+{
1205
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_S, vd, vj, vk));
1206
+}
1207
+
1208
+/* Emits the `vfcmp.ceq.s vd, vj, vk` instruction. */
1209
+static void __attribute__((unused))
1210
+tcg_out_opc_vfcmp_ceq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1211
+{
1212
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_S, vd, vj, vk));
1213
+}
1214
+
1215
+/* Emits the `vfcmp.seq.s vd, vj, vk` instruction. */
1216
+static void __attribute__((unused))
1217
+tcg_out_opc_vfcmp_seq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1218
+{
1219
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_S, vd, vj, vk));
1220
+}
1221
+
1222
+/* Emits the `vfcmp.cle.s vd, vj, vk` instruction. */
1223
+static void __attribute__((unused))
1224
+tcg_out_opc_vfcmp_cle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1225
+{
1226
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_S, vd, vj, vk));
1227
+}
1228
+
1229
+/* Emits the `vfcmp.sle.s vd, vj, vk` instruction. */
1230
+static void __attribute__((unused))
1231
+tcg_out_opc_vfcmp_sle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1232
+{
1233
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_S, vd, vj, vk));
1234
+}
1235
+
1236
+/* Emits the `vfcmp.cun.s vd, vj, vk` instruction. */
1237
+static void __attribute__((unused))
1238
+tcg_out_opc_vfcmp_cun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1239
+{
1240
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_S, vd, vj, vk));
1241
+}
1242
+
1243
+/* Emits the `vfcmp.sun.s vd, vj, vk` instruction. */
1244
+static void __attribute__((unused))
1245
+tcg_out_opc_vfcmp_sun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1246
+{
1247
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_S, vd, vj, vk));
1248
+}
1249
+
1250
+/* Emits the `vfcmp.cult.s vd, vj, vk` instruction. */
1251
+static void __attribute__((unused))
1252
+tcg_out_opc_vfcmp_cult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1253
+{
1254
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_S, vd, vj, vk));
1255
+}
1256
+
1257
+/* Emits the `vfcmp.sult.s vd, vj, vk` instruction. */
1258
+static void __attribute__((unused))
1259
+tcg_out_opc_vfcmp_sult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1260
+{
1261
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_S, vd, vj, vk));
1262
+}
1263
+
1264
+/* Emits the `vfcmp.cueq.s vd, vj, vk` instruction. */
1265
+static void __attribute__((unused))
1266
+tcg_out_opc_vfcmp_cueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1267
+{
1268
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_S, vd, vj, vk));
1269
+}
1270
+
1271
+/* Emits the `vfcmp.sueq.s vd, vj, vk` instruction. */
1272
+static void __attribute__((unused))
1273
+tcg_out_opc_vfcmp_sueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1274
+{
1275
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_S, vd, vj, vk));
1276
+}
1277
+
1278
+/* Emits the `vfcmp.cule.s vd, vj, vk` instruction. */
1279
+static void __attribute__((unused))
1280
+tcg_out_opc_vfcmp_cule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1281
+{
1282
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_S, vd, vj, vk));
1283
+}
1284
+
1285
+/* Emits the `vfcmp.sule.s vd, vj, vk` instruction. */
1286
+static void __attribute__((unused))
1287
+tcg_out_opc_vfcmp_sule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1288
+{
1289
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_S, vd, vj, vk));
1290
+}
1291
+
1292
+/* Emits the `vfcmp.cne.s vd, vj, vk` instruction. */
1293
+static void __attribute__((unused))
1294
+tcg_out_opc_vfcmp_cne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1295
+{
1296
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_S, vd, vj, vk));
1297
+}
1298
+
1299
+/* Emits the `vfcmp.sne.s vd, vj, vk` instruction. */
1300
+static void __attribute__((unused))
1301
+tcg_out_opc_vfcmp_sne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1302
+{
1303
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_S, vd, vj, vk));
1304
+}
1305
+
1306
+/* Emits the `vfcmp.cor.s vd, vj, vk` instruction. */
1307
+static void __attribute__((unused))
1308
+tcg_out_opc_vfcmp_cor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1309
+{
1310
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_S, vd, vj, vk));
1311
+}
1312
+
1313
+/* Emits the `vfcmp.sor.s vd, vj, vk` instruction. */
1314
+static void __attribute__((unused))
1315
+tcg_out_opc_vfcmp_sor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1316
+{
1317
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_S, vd, vj, vk));
1318
+}
1319
+
1320
+/* Emits the `vfcmp.cune.s vd, vj, vk` instruction. */
1321
+static void __attribute__((unused))
1322
+tcg_out_opc_vfcmp_cune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1323
+{
1324
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_S, vd, vj, vk));
1325
+}
1326
+
1327
+/* Emits the `vfcmp.sune.s vd, vj, vk` instruction. */
1328
+static void __attribute__((unused))
1329
+tcg_out_opc_vfcmp_sune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1330
+{
1331
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_S, vd, vj, vk));
1332
+}
1333
+
1334
+/* Emits the `vfcmp.caf.d vd, vj, vk` instruction. */
1335
+static void __attribute__((unused))
1336
+tcg_out_opc_vfcmp_caf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1337
+{
1338
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_D, vd, vj, vk));
1339
+}
1340
+
1341
+/* Emits the `vfcmp.saf.d vd, vj, vk` instruction. */
1342
+static void __attribute__((unused))
1343
+tcg_out_opc_vfcmp_saf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1344
+{
1345
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_D, vd, vj, vk));
1346
+}
1347
+
1348
+/* Emits the `vfcmp.clt.d vd, vj, vk` instruction. */
1349
+static void __attribute__((unused))
1350
+tcg_out_opc_vfcmp_clt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1351
+{
1352
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_D, vd, vj, vk));
1353
+}
1354
+
1355
+/* Emits the `vfcmp.slt.d vd, vj, vk` instruction. */
1356
+static void __attribute__((unused))
1357
+tcg_out_opc_vfcmp_slt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1358
+{
1359
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_D, vd, vj, vk));
1360
+}
1361
+
1362
+/* Emits the `vfcmp.ceq.d vd, vj, vk` instruction. */
1363
+static void __attribute__((unused))
1364
+tcg_out_opc_vfcmp_ceq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1365
+{
1366
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_D, vd, vj, vk));
1367
+}
1368
+
1369
+/* Emits the `vfcmp.seq.d vd, vj, vk` instruction. */
1370
+static void __attribute__((unused))
1371
+tcg_out_opc_vfcmp_seq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1372
+{
1373
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_D, vd, vj, vk));
1374
+}
1375
+
1376
+/* Emits the `vfcmp.cle.d vd, vj, vk` instruction. */
1377
+static void __attribute__((unused))
1378
+tcg_out_opc_vfcmp_cle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1379
+{
1380
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_D, vd, vj, vk));
1381
+}
1382
+
1383
+/* Emits the `vfcmp.sle.d vd, vj, vk` instruction. */
1384
+static void __attribute__((unused))
1385
+tcg_out_opc_vfcmp_sle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1386
+{
1387
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_D, vd, vj, vk));
1388
+}
1389
+
1390
+/* Emits the `vfcmp.cun.d vd, vj, vk` instruction. */
1391
+static void __attribute__((unused))
1392
+tcg_out_opc_vfcmp_cun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1393
+{
1394
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_D, vd, vj, vk));
1395
+}
1396
+
1397
+/* Emits the `vfcmp.sun.d vd, vj, vk` instruction. */
1398
+static void __attribute__((unused))
1399
+tcg_out_opc_vfcmp_sun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1400
+{
1401
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_D, vd, vj, vk));
1402
+}
1403
+
1404
+/* Emits the `vfcmp.cult.d vd, vj, vk` instruction. */
1405
+static void __attribute__((unused))
1406
+tcg_out_opc_vfcmp_cult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1407
+{
1408
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_D, vd, vj, vk));
1409
+}
1410
+
1411
+/* Emits the `vfcmp.sult.d vd, vj, vk` instruction. */
1412
+static void __attribute__((unused))
1413
+tcg_out_opc_vfcmp_sult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1414
+{
1415
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_D, vd, vj, vk));
1416
+}
1417
+
1418
+/* Emits the `vfcmp.cueq.d vd, vj, vk` instruction. */
1419
+static void __attribute__((unused))
1420
+tcg_out_opc_vfcmp_cueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1421
+{
1422
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_D, vd, vj, vk));
1423
+}
1424
+
1425
+/* Emits the `vfcmp.sueq.d vd, vj, vk` instruction. */
1426
+static void __attribute__((unused))
1427
+tcg_out_opc_vfcmp_sueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1428
+{
1429
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_D, vd, vj, vk));
1430
+}
1431
+
1432
+/* Emits the `vfcmp.cule.d vd, vj, vk` instruction. */
1433
+static void __attribute__((unused))
1434
+tcg_out_opc_vfcmp_cule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1435
+{
1436
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_D, vd, vj, vk));
1437
+}
1438
+
1439
+/* Emits the `vfcmp.sule.d vd, vj, vk` instruction. */
1440
+static void __attribute__((unused))
1441
+tcg_out_opc_vfcmp_sule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1442
+{
1443
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_D, vd, vj, vk));
1444
+}
1445
+
1446
+/* Emits the `vfcmp.cne.d vd, vj, vk` instruction. */
1447
+static void __attribute__((unused))
1448
+tcg_out_opc_vfcmp_cne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1449
+{
1450
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_D, vd, vj, vk));
1451
+}
1452
+
1453
+/* Emits the `vfcmp.sne.d vd, vj, vk` instruction. */
1454
+static void __attribute__((unused))
1455
+tcg_out_opc_vfcmp_sne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1456
+{
1457
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_D, vd, vj, vk));
1458
+}
1459
+
1460
+/* Emits the `vfcmp.cor.d vd, vj, vk` instruction. */
1461
+static void __attribute__((unused))
1462
+tcg_out_opc_vfcmp_cor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1463
+{
1464
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_D, vd, vj, vk));
1465
+}
1466
+
1467
+/* Emits the `vfcmp.sor.d vd, vj, vk` instruction. */
1468
+static void __attribute__((unused))
1469
+tcg_out_opc_vfcmp_sor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1470
+{
1471
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_D, vd, vj, vk));
1472
+}
1473
+
1474
+/* Emits the `vfcmp.cune.d vd, vj, vk` instruction. */
1475
+static void __attribute__((unused))
1476
+tcg_out_opc_vfcmp_cune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1477
+{
1478
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_D, vd, vj, vk));
1479
+}
1480
+
1481
+/* Emits the `vfcmp.sune.d vd, vj, vk` instruction. */
1482
+static void __attribute__((unused))
1483
+tcg_out_opc_vfcmp_sune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1484
+{
1485
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_D, vd, vj, vk));
1486
+}
1487
+
1488
+/* Emits the `vbitsel.v vd, vj, vk, va` instruction. */
1489
+static void __attribute__((unused))
1490
+tcg_out_opc_vbitsel_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1491
+{
1492
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VBITSEL_V, vd, vj, vk, va));
1493
+}
1494
+
1495
+/* Emits the `vshuf.b vd, vj, vk, va` instruction. */
1496
+static void __attribute__((unused))
1497
+tcg_out_opc_vshuf_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1498
+{
1499
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VSHUF_B, vd, vj, vk, va));
1500
+}
1501
+
1502
/* Emits the `addu16i.d d, j, sk16` instruction. */
1503
static void __attribute__((unused))
1504
tcg_out_opc_addu16i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
1505
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_ld_wu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
1506
tcg_out32(s, encode_djsk12_insn(OPC_LD_WU, d, j, sk12));
236
}
1507
}
237
1508
238
void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1509
+/* Emits the `vld vd, j, sk12` instruction. */
239
diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c
1510
+static void __attribute__((unused))
240
index XXXXXXX..XXXXXXX 100644
1511
+tcg_out_opc_vld(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
241
--- a/target/hexagon/translate.c
1512
+{
242
+++ b/target/hexagon/translate.c
1513
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VLD, vd, j, sk12));
243
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hexagon_tr_ops = {
1514
+}
244
.disas_log = hexagon_tr_disas_log,
1515
+
245
};
1516
+/* Emits the `vst vd, j, sk12` instruction. */
246
1517
+static void __attribute__((unused))
247
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1518
+tcg_out_opc_vst(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
248
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1519
+{
249
+ target_ulong pc, void *host_pc)
1520
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VST, vd, j, sk12));
250
{
1521
+}
251
DisasContext ctx;
1522
+
252
1523
+/* Emits the `vldrepl.d vd, j, sk9` instruction. */
253
- translator_loop(&hexagon_tr_ops, &ctx.base, cs, tb, max_insns);
1524
+static void __attribute__((unused))
254
+ translator_loop(cs, tb, max_insns, pc, host_pc,
1525
+tcg_out_opc_vldrepl_d(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk9)
255
+ &hexagon_tr_ops, &ctx.base);
1526
+{
1527
+ tcg_out32(s, encode_vdjsk9_insn(OPC_VLDREPL_D, vd, j, sk9));
1528
+}
1529
+
1530
+/* Emits the `vldrepl.w vd, j, sk10` instruction. */
1531
+static void __attribute__((unused))
1532
+tcg_out_opc_vldrepl_w(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk10)
1533
+{
1534
+ tcg_out32(s, encode_vdjsk10_insn(OPC_VLDREPL_W, vd, j, sk10));
1535
+}
1536
+
1537
+/* Emits the `vldrepl.h vd, j, sk11` instruction. */
1538
+static void __attribute__((unused))
1539
+tcg_out_opc_vldrepl_h(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk11)
1540
+{
1541
+ tcg_out32(s, encode_vdjsk11_insn(OPC_VLDREPL_H, vd, j, sk11));
1542
+}
1543
+
1544
+/* Emits the `vldrepl.b vd, j, sk12` instruction. */
1545
+static void __attribute__((unused))
1546
+tcg_out_opc_vldrepl_b(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
1547
+{
1548
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VLDREPL_B, vd, j, sk12));
1549
+}
1550
+
1551
+/* Emits the `vstelm.d vd, j, sk8, un1` instruction. */
1552
+static void __attribute__((unused))
1553
+tcg_out_opc_vstelm_d(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1554
+ uint32_t un1)
1555
+{
1556
+ tcg_out32(s, encode_vdjsk8un1_insn(OPC_VSTELM_D, vd, j, sk8, un1));
1557
+}
1558
+
1559
+/* Emits the `vstelm.w vd, j, sk8, un2` instruction. */
1560
+static void __attribute__((unused))
1561
+tcg_out_opc_vstelm_w(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1562
+ uint32_t un2)
1563
+{
1564
+ tcg_out32(s, encode_vdjsk8un2_insn(OPC_VSTELM_W, vd, j, sk8, un2));
1565
+}
1566
+
1567
+/* Emits the `vstelm.h vd, j, sk8, un3` instruction. */
1568
+static void __attribute__((unused))
1569
+tcg_out_opc_vstelm_h(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1570
+ uint32_t un3)
1571
+{
1572
+ tcg_out32(s, encode_vdjsk8un3_insn(OPC_VSTELM_H, vd, j, sk8, un3));
1573
+}
1574
+
1575
+/* Emits the `vstelm.b vd, j, sk8, un4` instruction. */
1576
+static void __attribute__((unused))
1577
+tcg_out_opc_vstelm_b(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1578
+ uint32_t un4)
1579
+{
1580
+ tcg_out32(s, encode_vdjsk8un4_insn(OPC_VSTELM_B, vd, j, sk8, un4));
1581
+}
1582
+
1583
/* Emits the `ldx.b d, j, k` instruction. */
1584
static void __attribute__((unused))
1585
tcg_out_opc_ldx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1586
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_ldx_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1587
tcg_out32(s, encode_djk_insn(OPC_LDX_WU, d, j, k));
256
}
1588
}
257
1589
258
#define NAME_LEN 64
1590
+/* Emits the `vldx vd, j, k` instruction. */
259
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
1591
+static void __attribute__((unused))
260
index XXXXXXX..XXXXXXX 100644
1592
+tcg_out_opc_vldx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
261
--- a/target/hppa/translate.c
1593
+{
262
+++ b/target/hppa/translate.c
1594
+ tcg_out32(s, encode_vdjk_insn(OPC_VLDX, vd, j, k));
263
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps hppa_tr_ops = {
1595
+}
264
.disas_log = hppa_tr_disas_log,
1596
+
265
};
1597
+/* Emits the `vstx vd, j, k` instruction. */
266
1598
+static void __attribute__((unused))
267
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1599
+tcg_out_opc_vstx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
268
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1600
+{
269
+ target_ulong pc, void *host_pc)
1601
+ tcg_out32(s, encode_vdjk_insn(OPC_VSTX, vd, j, k));
270
{
1602
+}
271
DisasContext ctx;
1603
+
272
- translator_loop(&hppa_tr_ops, &ctx.base, cs, tb, max_insns);
1604
/* Emits the `dbar ud15` instruction. */
273
+ translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
1605
static void __attribute__((unused))
1606
tcg_out_opc_dbar(TCGContext *s, uint32_t ud15)
1607
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_bleu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
1608
tcg_out32(s, encode_djsk16_insn(OPC_BLEU, d, j, sk16));
274
}
1609
}
275
1610
276
void restore_state_to_opc(CPUHPPAState *env, TranslationBlock *tb,
1611
+/* Emits the `vseq.b vd, vj, vk` instruction. */
277
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
1612
+static void __attribute__((unused))
278
index XXXXXXX..XXXXXXX 100644
1613
+tcg_out_opc_vseq_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
279
--- a/target/i386/tcg/translate.c
1614
+{
280
+++ b/target/i386/tcg/translate.c
1615
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_B, vd, vj, vk));
281
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps i386_tr_ops = {
1616
+}
282
};
1617
+
283
1618
+/* Emits the `vseq.h vd, vj, vk` instruction. */
284
/* generate intermediate code for basic block 'tb'. */
1619
+static void __attribute__((unused))
285
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1620
+tcg_out_opc_vseq_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
286
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
1621
+{
287
+ target_ulong pc, void *host_pc)
1622
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_H, vd, vj, vk));
288
{
1623
+}
289
DisasContext dc;
1624
+
290
1625
+/* Emits the `vseq.w vd, vj, vk` instruction. */
291
- translator_loop(&i386_tr_ops, &dc.base, cpu, tb, max_insns);
1626
+static void __attribute__((unused))
292
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
1627
+tcg_out_opc_vseq_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
293
}
1628
+{
294
1629
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_W, vd, vj, vk));
295
void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
1630
+}
296
diff --git a/target/loongarch/translate.c b/target/loongarch/translate.c
1631
+
297
index XXXXXXX..XXXXXXX 100644
1632
+/* Emits the `vseq.d vd, vj, vk` instruction. */
298
--- a/target/loongarch/translate.c
1633
+static void __attribute__((unused))
299
+++ b/target/loongarch/translate.c
1634
+tcg_out_opc_vseq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
300
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps loongarch_tr_ops = {
1635
+{
301
.disas_log = loongarch_tr_disas_log,
1636
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_D, vd, vj, vk));
302
};
1637
+}
303
1638
+
304
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1639
+/* Emits the `vsle.b vd, vj, vk` instruction. */
305
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1640
+static void __attribute__((unused))
306
+ target_ulong pc, void *host_pc)
1641
+tcg_out_opc_vsle_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
307
{
1642
+{
308
DisasContext ctx;
1643
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_B, vd, vj, vk));
309
1644
+}
310
- translator_loop(&loongarch_tr_ops, &ctx.base, cs, tb, max_insns);
1645
+
311
+ translator_loop(cs, tb, max_insns, pc, host_pc,
1646
+/* Emits the `vsle.h vd, vj, vk` instruction. */
312
+ &loongarch_tr_ops, &ctx.base);
1647
+static void __attribute__((unused))
313
}
1648
+tcg_out_opc_vsle_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
314
1649
+{
315
void loongarch_translate_init(void)
1650
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_H, vd, vj, vk));
316
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
1651
+}
317
index XXXXXXX..XXXXXXX 100644
1652
+
318
--- a/target/m68k/translate.c
1653
+/* Emits the `vsle.w vd, vj, vk` instruction. */
319
+++ b/target/m68k/translate.c
1654
+static void __attribute__((unused))
320
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps m68k_tr_ops = {
1655
+tcg_out_opc_vsle_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
321
.disas_log = m68k_tr_disas_log,
1656
+{
322
};
1657
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_W, vd, vj, vk));
323
1658
+}
324
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1659
+
325
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
1660
+/* Emits the `vsle.d vd, vj, vk` instruction. */
326
+ target_ulong pc, void *host_pc)
1661
+static void __attribute__((unused))
327
{
1662
+tcg_out_opc_vsle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
328
DisasContext dc;
1663
+{
329
- translator_loop(&m68k_tr_ops, &dc.base, cpu, tb, max_insns);
1664
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_D, vd, vj, vk));
330
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
1665
+}
331
}
1666
+
332
1667
+/* Emits the `vsle.bu vd, vj, vk` instruction. */
333
static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
1668
+static void __attribute__((unused))
334
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
1669
+tcg_out_opc_vsle_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
335
index XXXXXXX..XXXXXXX 100644
1670
+{
336
--- a/target/microblaze/translate.c
1671
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_BU, vd, vj, vk));
337
+++ b/target/microblaze/translate.c
1672
+}
338
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mb_tr_ops = {
1673
+
339
.disas_log = mb_tr_disas_log,
1674
+/* Emits the `vsle.hu vd, vj, vk` instruction. */
340
};
1675
+static void __attribute__((unused))
341
1676
+tcg_out_opc_vsle_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
342
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1677
+{
343
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
1678
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_HU, vd, vj, vk));
344
+ target_ulong pc, void *host_pc)
1679
+}
345
{
1680
+
346
DisasContext dc;
1681
+/* Emits the `vsle.wu vd, vj, vk` instruction. */
347
- translator_loop(&mb_tr_ops, &dc.base, cpu, tb, max_insns);
1682
+static void __attribute__((unused))
348
+ translator_loop(cpu, tb, max_insns, pc, host_pc, &mb_tr_ops, &dc.base);
1683
+tcg_out_opc_vsle_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
349
}
1684
+{
350
1685
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_WU, vd, vj, vk));
351
void mb_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1686
+}
352
diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c
1687
+
353
index XXXXXXX..XXXXXXX 100644
1688
+/* Emits the `vsle.du vd, vj, vk` instruction. */
354
--- a/target/mips/tcg/translate.c
1689
+static void __attribute__((unused))
355
+++ b/target/mips/tcg/translate.c
1690
+tcg_out_opc_vsle_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
356
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps mips_tr_ops = {
1691
+{
357
.disas_log = mips_tr_disas_log,
1692
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_DU, vd, vj, vk));
358
};
1693
+}
359
1694
+
360
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1695
+/* Emits the `vslt.b vd, vj, vk` instruction. */
361
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1696
+static void __attribute__((unused))
362
+ target_ulong pc, void *host_pc)
1697
+tcg_out_opc_vslt_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
363
{
1698
+{
364
DisasContext ctx;
1699
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_B, vd, vj, vk));
365
1700
+}
366
- translator_loop(&mips_tr_ops, &ctx.base, cs, tb, max_insns);
1701
+
367
+ translator_loop(cs, tb, max_insns, pc, host_pc, &mips_tr_ops, &ctx.base);
1702
+/* Emits the `vslt.h vd, vj, vk` instruction. */
368
}
1703
+static void __attribute__((unused))
369
1704
+tcg_out_opc_vslt_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
370
void mips_tcg_init(void)
1705
+{
371
diff --git a/target/nios2/translate.c b/target/nios2/translate.c
1706
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_H, vd, vj, vk));
372
index XXXXXXX..XXXXXXX 100644
1707
+}
373
--- a/target/nios2/translate.c
1708
+
374
+++ b/target/nios2/translate.c
1709
+/* Emits the `vslt.w vd, vj, vk` instruction. */
375
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps nios2_tr_ops = {
1710
+static void __attribute__((unused))
376
.disas_log = nios2_tr_disas_log,
1711
+tcg_out_opc_vslt_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
377
};
1712
+{
378
1713
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_W, vd, vj, vk));
379
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1714
+}
380
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1715
+
381
+ target_ulong pc, void *host_pc)
1716
+/* Emits the `vslt.d vd, vj, vk` instruction. */
382
{
1717
+static void __attribute__((unused))
383
DisasContext dc;
1718
+tcg_out_opc_vslt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
384
- translator_loop(&nios2_tr_ops, &dc.base, cs, tb, max_insns);
1719
+{
385
+ translator_loop(cs, tb, max_insns, pc, host_pc, &nios2_tr_ops, &dc.base);
1720
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_D, vd, vj, vk));
386
}
1721
+}
387
1722
+
388
void nios2_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1723
+/* Emits the `vslt.bu vd, vj, vk` instruction. */
389
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
1724
+static void __attribute__((unused))
390
index XXXXXXX..XXXXXXX 100644
1725
+tcg_out_opc_vslt_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
391
--- a/target/openrisc/translate.c
1726
+{
392
+++ b/target/openrisc/translate.c
1727
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_BU, vd, vj, vk));
393
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps openrisc_tr_ops = {
1728
+}
394
.disas_log = openrisc_tr_disas_log,
1729
+
395
};
1730
+/* Emits the `vslt.hu vd, vj, vk` instruction. */
396
1731
+static void __attribute__((unused))
397
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1732
+tcg_out_opc_vslt_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
398
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1733
+{
399
+ target_ulong pc, void *host_pc)
1734
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_HU, vd, vj, vk));
400
{
1735
+}
401
DisasContext ctx;
1736
+
402
1737
+/* Emits the `vslt.wu vd, vj, vk` instruction. */
403
- translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns);
1738
+static void __attribute__((unused))
404
+ translator_loop(cs, tb, max_insns, pc, host_pc,
1739
+tcg_out_opc_vslt_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
405
+ &openrisc_tr_ops, &ctx.base);
1740
+{
406
}
1741
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_WU, vd, vj, vk));
407
1742
+}
408
void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1743
+
409
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
1744
+/* Emits the `vslt.du vd, vj, vk` instruction. */
410
index XXXXXXX..XXXXXXX 100644
1745
+static void __attribute__((unused))
411
--- a/target/ppc/translate.c
1746
+tcg_out_opc_vslt_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
412
+++ b/target/ppc/translate.c
1747
+{
413
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps ppc_tr_ops = {
1748
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_DU, vd, vj, vk));
414
.disas_log = ppc_tr_disas_log,
1749
+}
415
};
1750
+
416
1751
+/* Emits the `vadd.b vd, vj, vk` instruction. */
417
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1752
+static void __attribute__((unused))
418
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1753
+tcg_out_opc_vadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
419
+ target_ulong pc, void *host_pc)
1754
+{
420
{
1755
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_B, vd, vj, vk));
421
DisasContext ctx;
1756
+}
422
1757
+
423
- translator_loop(&ppc_tr_ops, &ctx.base, cs, tb, max_insns);
1758
+/* Emits the `vadd.h vd, vj, vk` instruction. */
424
+ translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
1759
+static void __attribute__((unused))
425
}
1760
+tcg_out_opc_vadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
426
1761
+{
427
void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
1762
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_H, vd, vj, vk));
428
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
1763
+}
429
index XXXXXXX..XXXXXXX 100644
1764
+
430
--- a/target/riscv/translate.c
1765
+/* Emits the `vadd.w vd, vj, vk` instruction. */
431
+++ b/target/riscv/translate.c
1766
+static void __attribute__((unused))
432
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps riscv_tr_ops = {
1767
+tcg_out_opc_vadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
433
.disas_log = riscv_tr_disas_log,
1768
+{
434
};
1769
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_W, vd, vj, vk));
435
1770
+}
436
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1771
+
437
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1772
+/* Emits the `vadd.d vd, vj, vk` instruction. */
438
+ target_ulong pc, void *host_pc)
1773
+static void __attribute__((unused))
439
{
1774
+tcg_out_opc_vadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
440
DisasContext ctx;
1775
+{
441
1776
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_D, vd, vj, vk));
442
- translator_loop(&riscv_tr_ops, &ctx.base, cs, tb, max_insns);
1777
+}
443
+ translator_loop(cs, tb, max_insns, pc, host_pc, &riscv_tr_ops, &ctx.base);
1778
+
444
}
1779
+/* Emits the `vsub.b vd, vj, vk` instruction. */
445
1780
+static void __attribute__((unused))
446
void riscv_translate_init(void)
1781
+tcg_out_opc_vsub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
447
diff --git a/target/rx/translate.c b/target/rx/translate.c
1782
+{
448
index XXXXXXX..XXXXXXX 100644
1783
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_B, vd, vj, vk));
449
--- a/target/rx/translate.c
1784
+}
450
+++ b/target/rx/translate.c
1785
+
451
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps rx_tr_ops = {
1786
+/* Emits the `vsub.h vd, vj, vk` instruction. */
452
.disas_log = rx_tr_disas_log,
1787
+static void __attribute__((unused))
453
};
1788
+tcg_out_opc_vsub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
454
1789
+{
455
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1790
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_H, vd, vj, vk));
456
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1791
+}
457
+ target_ulong pc, void *host_pc)
1792
+
458
{
1793
+/* Emits the `vsub.w vd, vj, vk` instruction. */
459
DisasContext dc;
1794
+static void __attribute__((unused))
460
1795
+tcg_out_opc_vsub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
461
- translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
1796
+{
462
+ translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
1797
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_W, vd, vj, vk));
463
}
1798
+}
464
1799
+
465
void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
1800
+/* Emits the `vsub.d vd, vj, vk` instruction. */
466
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
1801
+static void __attribute__((unused))
467
index XXXXXXX..XXXXXXX 100644
1802
+tcg_out_opc_vsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
468
--- a/target/s390x/tcg/translate.c
1803
+{
469
+++ b/target/s390x/tcg/translate.c
1804
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_D, vd, vj, vk));
470
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps s390x_tr_ops = {
1805
+}
471
.disas_log = s390x_tr_disas_log,
1806
+
472
};
1807
+/* Emits the `vaddwev.h.b vd, vj, vk` instruction. */
473
1808
+static void __attribute__((unused))
474
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1809
+tcg_out_opc_vaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
475
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1810
+{
476
+ target_ulong pc, void *host_pc)
1811
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_B, vd, vj, vk));
477
{
1812
+}
478
DisasContext dc;
1813
+
479
1814
+/* Emits the `vaddwev.w.h vd, vj, vk` instruction. */
480
- translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns);
1815
+static void __attribute__((unused))
481
+ translator_loop(cs, tb, max_insns, pc, host_pc, &s390x_tr_ops, &dc.base);
1816
+tcg_out_opc_vaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
482
}
1817
+{
483
1818
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_H, vd, vj, vk));
484
void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
1819
+}
485
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
1820
+
486
index XXXXXXX..XXXXXXX 100644
1821
+/* Emits the `vaddwev.d.w vd, vj, vk` instruction. */
487
--- a/target/sh4/translate.c
1822
+static void __attribute__((unused))
488
+++ b/target/sh4/translate.c
1823
+tcg_out_opc_vaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
489
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sh4_tr_ops = {
1824
+{
490
.disas_log = sh4_tr_disas_log,
1825
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_W, vd, vj, vk));
491
};
1826
+}
492
1827
+
493
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1828
+/* Emits the `vaddwev.q.d vd, vj, vk` instruction. */
494
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1829
+static void __attribute__((unused))
495
+ target_ulong pc, void *host_pc)
1830
+tcg_out_opc_vaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
496
{
1831
+{
497
DisasContext ctx;
1832
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_D, vd, vj, vk));
498
1833
+}
499
- translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
1834
+
500
+ translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
1835
+/* Emits the `vsubwev.h.b vd, vj, vk` instruction. */
501
}
1836
+static void __attribute__((unused))
502
1837
+tcg_out_opc_vsubwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
503
void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
1838
+{
504
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
1839
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_B, vd, vj, vk));
505
index XXXXXXX..XXXXXXX 100644
1840
+}
506
--- a/target/sparc/translate.c
1841
+
507
+++ b/target/sparc/translate.c
1842
+/* Emits the `vsubwev.w.h vd, vj, vk` instruction. */
508
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps sparc_tr_ops = {
1843
+static void __attribute__((unused))
509
.disas_log = sparc_tr_disas_log,
1844
+tcg_out_opc_vsubwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
510
};
1845
+{
511
1846
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_H, vd, vj, vk));
512
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1847
+}
513
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1848
+
514
+ target_ulong pc, void *host_pc)
1849
+/* Emits the `vsubwev.d.w vd, vj, vk` instruction. */
515
{
1850
+static void __attribute__((unused))
516
DisasContext dc = {};
1851
+tcg_out_opc_vsubwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
517
1852
+{
518
- translator_loop(&sparc_tr_ops, &dc.base, cs, tb, max_insns);
1853
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_W, vd, vj, vk));
519
+ translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
1854
+}
520
}
1855
+
521
1856
+/* Emits the `vsubwev.q.d vd, vj, vk` instruction. */
522
void sparc_tcg_init(void)
1857
+static void __attribute__((unused))
523
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
1858
+tcg_out_opc_vsubwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
524
index XXXXXXX..XXXXXXX 100644
1859
+{
525
--- a/target/tricore/translate.c
1860
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_D, vd, vj, vk));
526
+++ b/target/tricore/translate.c
1861
+}
527
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps tricore_tr_ops = {
1862
+
528
};
1863
+/* Emits the `vaddwod.h.b vd, vj, vk` instruction. */
529
1864
+static void __attribute__((unused))
530
1865
+tcg_out_opc_vaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
531
-void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1866
+{
532
+void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
1867
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_B, vd, vj, vk));
533
+ target_ulong pc, void *host_pc)
1868
+}
534
{
1869
+
535
DisasContext ctx;
1870
+/* Emits the `vaddwod.w.h vd, vj, vk` instruction. */
536
- translator_loop(&tricore_tr_ops, &ctx.base, cs, tb, max_insns);
1871
+static void __attribute__((unused))
537
+ translator_loop(cs, tb, max_insns, pc, host_pc,
1872
+tcg_out_opc_vaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
538
+ &tricore_tr_ops, &ctx.base);
1873
+{
539
}
1874
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_H, vd, vj, vk));
540
1875
+}
541
void
1876
+
542
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
1877
+/* Emits the `vaddwod.d.w vd, vj, vk` instruction. */
543
index XXXXXXX..XXXXXXX 100644
1878
+static void __attribute__((unused))
544
--- a/target/xtensa/translate.c
1879
+tcg_out_opc_vaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
545
+++ b/target/xtensa/translate.c
1880
+{
546
@@ -XXX,XX +XXX,XX @@ static const TranslatorOps xtensa_translator_ops = {
1881
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_W, vd, vj, vk));
547
.disas_log = xtensa_tr_disas_log,
1882
+}
548
};
1883
+
549
1884
+/* Emits the `vaddwod.q.d vd, vj, vk` instruction. */
550
-void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
1885
+static void __attribute__((unused))
551
+void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns,
1886
+tcg_out_opc_vaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
552
+ target_ulong pc, void *host_pc)
1887
+{
553
{
1888
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_D, vd, vj, vk));
554
DisasContext dc = {};
1889
+}
555
- translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns);
1890
+
556
+ translator_loop(cpu, tb, max_insns, pc, host_pc,
1891
+/* Emits the `vsubwod.h.b vd, vj, vk` instruction. */
557
+ &xtensa_translator_ops, &dc.base);
1892
+static void __attribute__((unused))
558
}
1893
+tcg_out_opc_vsubwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
559
1894
+{
560
void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1895
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_B, vd, vj, vk));
1896
+}
1897
+
1898
+/* Emits the `vsubwod.w.h vd, vj, vk` instruction. */
1899
+static void __attribute__((unused))
1900
+tcg_out_opc_vsubwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1901
+{
1902
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_H, vd, vj, vk));
1903
+}
1904
+
1905
+/* Emits the `vsubwod.d.w vd, vj, vk` instruction. */
1906
+static void __attribute__((unused))
1907
+tcg_out_opc_vsubwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1908
+{
1909
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_W, vd, vj, vk));
1910
+}
1911
+
1912
+/* Emits the `vsubwod.q.d vd, vj, vk` instruction. */
1913
+static void __attribute__((unused))
1914
+tcg_out_opc_vsubwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1915
+{
1916
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_D, vd, vj, vk));
1917
+}
1918
+
1919
+/* Emits the `vaddwev.h.bu vd, vj, vk` instruction. */
1920
+static void __attribute__((unused))
1921
+tcg_out_opc_vaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1922
+{
1923
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU, vd, vj, vk));
1924
+}
1925
+
1926
+/* Emits the `vaddwev.w.hu vd, vj, vk` instruction. */
1927
+static void __attribute__((unused))
1928
+tcg_out_opc_vaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1929
+{
1930
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU, vd, vj, vk));
1931
+}
1932
+
1933
+/* Emits the `vaddwev.d.wu vd, vj, vk` instruction. */
1934
+static void __attribute__((unused))
1935
+tcg_out_opc_vaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1936
+{
1937
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU, vd, vj, vk));
1938
+}
1939
+
1940
+/* Emits the `vaddwev.q.du vd, vj, vk` instruction. */
1941
+static void __attribute__((unused))
1942
+tcg_out_opc_vaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1943
+{
1944
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU, vd, vj, vk));
1945
+}
1946
+
1947
+/* Emits the `vsubwev.h.bu vd, vj, vk` instruction. */
1948
+static void __attribute__((unused))
1949
+tcg_out_opc_vsubwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1950
+{
1951
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_BU, vd, vj, vk));
1952
+}
1953
+
1954
+/* Emits the `vsubwev.w.hu vd, vj, vk` instruction. */
1955
+static void __attribute__((unused))
1956
+tcg_out_opc_vsubwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1957
+{
1958
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_HU, vd, vj, vk));
1959
+}
1960
+
1961
+/* Emits the `vsubwev.d.wu vd, vj, vk` instruction. */
1962
+static void __attribute__((unused))
1963
+tcg_out_opc_vsubwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1964
+{
1965
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_WU, vd, vj, vk));
1966
+}
1967
+
1968
+/* Emits the `vsubwev.q.du vd, vj, vk` instruction. */
1969
+static void __attribute__((unused))
1970
+tcg_out_opc_vsubwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1971
+{
1972
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_DU, vd, vj, vk));
1973
+}
1974
+
1975
+/* Emits the `vaddwod.h.bu vd, vj, vk` instruction. */
1976
+static void __attribute__((unused))
1977
+tcg_out_opc_vaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1978
+{
1979
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU, vd, vj, vk));
1980
+}
1981
+
1982
+/* Emits the `vaddwod.w.hu vd, vj, vk` instruction. */
1983
+static void __attribute__((unused))
1984
+tcg_out_opc_vaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1985
+{
1986
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU, vd, vj, vk));
1987
+}
1988
+
1989
+/* Emits the `vaddwod.d.wu vd, vj, vk` instruction. */
1990
+static void __attribute__((unused))
1991
+tcg_out_opc_vaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1992
+{
1993
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU, vd, vj, vk));
1994
+}
1995
+
1996
+/* Emits the `vaddwod.q.du vd, vj, vk` instruction. */
1997
+static void __attribute__((unused))
1998
+tcg_out_opc_vaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1999
+{
2000
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU, vd, vj, vk));
2001
+}
2002
+
2003
+/* Emits the `vsubwod.h.bu vd, vj, vk` instruction. */
2004
+static void __attribute__((unused))
2005
+tcg_out_opc_vsubwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2006
+{
2007
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_BU, vd, vj, vk));
2008
+}
2009
+
2010
+/* Emits the `vsubwod.w.hu vd, vj, vk` instruction. */
2011
+static void __attribute__((unused))
2012
+tcg_out_opc_vsubwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2013
+{
2014
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_HU, vd, vj, vk));
2015
+}
2016
+
2017
+/* Emits the `vsubwod.d.wu vd, vj, vk` instruction. */
2018
+static void __attribute__((unused))
2019
+tcg_out_opc_vsubwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2020
+{
2021
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_WU, vd, vj, vk));
2022
+}
2023
+
2024
+/* Emits the `vsubwod.q.du vd, vj, vk` instruction. */
2025
+static void __attribute__((unused))
2026
+tcg_out_opc_vsubwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2027
+{
2028
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_DU, vd, vj, vk));
2029
+}
2030
+
2031
+/* Emits the `vaddwev.h.bu.b vd, vj, vk` instruction. */
2032
+static void __attribute__((unused))
2033
+tcg_out_opc_vaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2034
+{
2035
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU_B, vd, vj, vk));
2036
+}
2037
+
2038
+/* Emits the `vaddwev.w.hu.h vd, vj, vk` instruction. */
2039
+static void __attribute__((unused))
2040
+tcg_out_opc_vaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2041
+{
2042
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU_H, vd, vj, vk));
2043
+}
2044
+
2045
+/* Emits the `vaddwev.d.wu.w vd, vj, vk` instruction. */
2046
+static void __attribute__((unused))
2047
+tcg_out_opc_vaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2048
+{
2049
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU_W, vd, vj, vk));
2050
+}
2051
+
2052
+/* Emits the `vaddwev.q.du.d vd, vj, vk` instruction. */
2053
+static void __attribute__((unused))
2054
+tcg_out_opc_vaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2055
+{
2056
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU_D, vd, vj, vk));
2057
+}
2058
+
2059
+/* Emits the `vaddwod.h.bu.b vd, vj, vk` instruction. */
2060
+static void __attribute__((unused))
2061
+tcg_out_opc_vaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2062
+{
2063
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU_B, vd, vj, vk));
2064
+}
2065
+
2066
+/* Emits the `vaddwod.w.hu.h vd, vj, vk` instruction. */
2067
+static void __attribute__((unused))
2068
+tcg_out_opc_vaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2069
+{
2070
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU_H, vd, vj, vk));
2071
+}
2072
+
2073
+/* Emits the `vaddwod.d.wu.w vd, vj, vk` instruction. */
2074
+static void __attribute__((unused))
2075
+tcg_out_opc_vaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2076
+{
2077
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU_W, vd, vj, vk));
2078
+}
2079
+
2080
+/* Emits the `vaddwod.q.du.d vd, vj, vk` instruction. */
2081
+static void __attribute__((unused))
2082
+tcg_out_opc_vaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2083
+{
2084
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU_D, vd, vj, vk));
2085
+}
2086
+
2087
+/* Emits the `vsadd.b vd, vj, vk` instruction. */
2088
+static void __attribute__((unused))
2089
+tcg_out_opc_vsadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2090
+{
2091
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_B, vd, vj, vk));
2092
+}
2093
+
2094
+/* Emits the `vsadd.h vd, vj, vk` instruction. */
2095
+static void __attribute__((unused))
2096
+tcg_out_opc_vsadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2097
+{
2098
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_H, vd, vj, vk));
2099
+}
2100
+
2101
+/* Emits the `vsadd.w vd, vj, vk` instruction. */
2102
+static void __attribute__((unused))
2103
+tcg_out_opc_vsadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2104
+{
2105
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_W, vd, vj, vk));
2106
+}
2107
+
2108
+/* Emits the `vsadd.d vd, vj, vk` instruction. */
2109
+static void __attribute__((unused))
2110
+tcg_out_opc_vsadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2111
+{
2112
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_D, vd, vj, vk));
2113
+}
2114
+
2115
+/* Emits the `vssub.b vd, vj, vk` instruction. */
2116
+static void __attribute__((unused))
2117
+tcg_out_opc_vssub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2118
+{
2119
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_B, vd, vj, vk));
2120
+}
2121
+
2122
+/* Emits the `vssub.h vd, vj, vk` instruction. */
2123
+static void __attribute__((unused))
2124
+tcg_out_opc_vssub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2125
+{
2126
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_H, vd, vj, vk));
2127
+}
2128
+
2129
+/* Emits the `vssub.w vd, vj, vk` instruction. */
2130
+static void __attribute__((unused))
2131
+tcg_out_opc_vssub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2132
+{
2133
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_W, vd, vj, vk));
2134
+}
2135
+
2136
+/* Emits the `vssub.d vd, vj, vk` instruction. */
2137
+static void __attribute__((unused))
2138
+tcg_out_opc_vssub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2139
+{
2140
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_D, vd, vj, vk));
2141
+}
2142
+
2143
+/* Emits the `vsadd.bu vd, vj, vk` instruction. */
2144
+static void __attribute__((unused))
2145
+tcg_out_opc_vsadd_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2146
+{
2147
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_BU, vd, vj, vk));
2148
+}
2149
+
2150
+/* Emits the `vsadd.hu vd, vj, vk` instruction. */
2151
+static void __attribute__((unused))
2152
+tcg_out_opc_vsadd_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2153
+{
2154
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_HU, vd, vj, vk));
2155
+}
2156
+
2157
+/* Emits the `vsadd.wu vd, vj, vk` instruction. */
2158
+static void __attribute__((unused))
2159
+tcg_out_opc_vsadd_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2160
+{
2161
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_WU, vd, vj, vk));
2162
+}
2163
+
2164
+/* Emits the `vsadd.du vd, vj, vk` instruction. */
2165
+static void __attribute__((unused))
2166
+tcg_out_opc_vsadd_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2167
+{
2168
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_DU, vd, vj, vk));
2169
+}
2170
+
2171
+/* Emits the `vssub.bu vd, vj, vk` instruction. */
2172
+static void __attribute__((unused))
2173
+tcg_out_opc_vssub_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2174
+{
2175
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_BU, vd, vj, vk));
2176
+}
2177
+
2178
+/* Emits the `vssub.hu vd, vj, vk` instruction. */
2179
+static void __attribute__((unused))
2180
+tcg_out_opc_vssub_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2181
+{
2182
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_HU, vd, vj, vk));
2183
+}
2184
+
2185
+/* Emits the `vssub.wu vd, vj, vk` instruction. */
2186
+static void __attribute__((unused))
2187
+tcg_out_opc_vssub_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2188
+{
2189
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_WU, vd, vj, vk));
2190
+}
2191
+
2192
+/* Emits the `vssub.du vd, vj, vk` instruction. */
2193
+static void __attribute__((unused))
2194
+tcg_out_opc_vssub_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2195
+{
2196
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_DU, vd, vj, vk));
2197
+}
2198
+
2199
+/* Emits the `vhaddw.h.b vd, vj, vk` instruction. */
2200
+static void __attribute__((unused))
2201
+tcg_out_opc_vhaddw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2202
+{
2203
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_H_B, vd, vj, vk));
2204
+}
2205
+
2206
+/* Emits the `vhaddw.w.h vd, vj, vk` instruction. */
2207
+static void __attribute__((unused))
2208
+tcg_out_opc_vhaddw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2209
+{
2210
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_W_H, vd, vj, vk));
2211
+}
2212
+
2213
+/* Emits the `vhaddw.d.w vd, vj, vk` instruction. */
2214
+static void __attribute__((unused))
2215
+tcg_out_opc_vhaddw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2216
+{
2217
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_D_W, vd, vj, vk));
2218
+}
2219
+
2220
+/* Emits the `vhaddw.q.d vd, vj, vk` instruction. */
2221
+static void __attribute__((unused))
2222
+tcg_out_opc_vhaddw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2223
+{
2224
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_Q_D, vd, vj, vk));
2225
+}
2226
+
2227
+/* Emits the `vhsubw.h.b vd, vj, vk` instruction. */
2228
+static void __attribute__((unused))
2229
+tcg_out_opc_vhsubw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2230
+{
2231
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_H_B, vd, vj, vk));
2232
+}
2233
+
2234
+/* Emits the `vhsubw.w.h vd, vj, vk` instruction. */
2235
+static void __attribute__((unused))
2236
+tcg_out_opc_vhsubw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2237
+{
2238
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_W_H, vd, vj, vk));
2239
+}
2240
+
2241
+/* Emits the `vhsubw.d.w vd, vj, vk` instruction. */
2242
+static void __attribute__((unused))
2243
+tcg_out_opc_vhsubw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2244
+{
2245
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_D_W, vd, vj, vk));
2246
+}
2247
+
2248
+/* Emits the `vhsubw.q.d vd, vj, vk` instruction. */
2249
+static void __attribute__((unused))
2250
+tcg_out_opc_vhsubw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2251
+{
2252
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_Q_D, vd, vj, vk));
2253
+}
2254
+
2255
+/* Emits the `vhaddw.hu.bu vd, vj, vk` instruction. */
2256
+static void __attribute__((unused))
2257
+tcg_out_opc_vhaddw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2258
+{
2259
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_HU_BU, vd, vj, vk));
2260
+}
2261
+
2262
+/* Emits the `vhaddw.wu.hu vd, vj, vk` instruction. */
2263
+static void __attribute__((unused))
2264
+tcg_out_opc_vhaddw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2265
+{
2266
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_WU_HU, vd, vj, vk));
2267
+}
2268
+
2269
+/* Emits the `vhaddw.du.wu vd, vj, vk` instruction. */
2270
+static void __attribute__((unused))
2271
+tcg_out_opc_vhaddw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2272
+{
2273
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_DU_WU, vd, vj, vk));
2274
+}
2275
+
2276
+/* Emits the `vhaddw.qu.du vd, vj, vk` instruction. */
2277
+static void __attribute__((unused))
2278
+tcg_out_opc_vhaddw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2279
+{
2280
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_QU_DU, vd, vj, vk));
2281
+}
2282
+
2283
+/* Emits the `vhsubw.hu.bu vd, vj, vk` instruction. */
2284
+static void __attribute__((unused))
2285
+tcg_out_opc_vhsubw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2286
+{
2287
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_HU_BU, vd, vj, vk));
2288
+}
2289
+
2290
+/* Emits the `vhsubw.wu.hu vd, vj, vk` instruction. */
2291
+static void __attribute__((unused))
2292
+tcg_out_opc_vhsubw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2293
+{
2294
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_WU_HU, vd, vj, vk));
2295
+}
2296
+
2297
+/* Emits the `vhsubw.du.wu vd, vj, vk` instruction. */
2298
+static void __attribute__((unused))
2299
+tcg_out_opc_vhsubw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2300
+{
2301
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_DU_WU, vd, vj, vk));
2302
+}
2303
+
2304
+/* Emits the `vhsubw.qu.du vd, vj, vk` instruction. */
2305
+static void __attribute__((unused))
2306
+tcg_out_opc_vhsubw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2307
+{
2308
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_QU_DU, vd, vj, vk));
2309
+}
2310
+
2311
+/* Emits the `vadda.b vd, vj, vk` instruction. */
2312
+static void __attribute__((unused))
2313
+tcg_out_opc_vadda_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2314
+{
2315
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_B, vd, vj, vk));
2316
+}
2317
+
2318
+/* Emits the `vadda.h vd, vj, vk` instruction. */
2319
+static void __attribute__((unused))
2320
+tcg_out_opc_vadda_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2321
+{
2322
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_H, vd, vj, vk));
2323
+}
2324
+
2325
+/* Emits the `vadda.w vd, vj, vk` instruction. */
2326
+static void __attribute__((unused))
2327
+tcg_out_opc_vadda_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2328
+{
2329
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_W, vd, vj, vk));
2330
+}
2331
+
2332
+/* Emits the `vadda.d vd, vj, vk` instruction. */
2333
+static void __attribute__((unused))
2334
+tcg_out_opc_vadda_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2335
+{
2336
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_D, vd, vj, vk));
2337
+}
2338
+
2339
+/* Emits the `vabsd.b vd, vj, vk` instruction. */
2340
+static void __attribute__((unused))
2341
+tcg_out_opc_vabsd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2342
+{
2343
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_B, vd, vj, vk));
2344
+}
2345
+
2346
+/* Emits the `vabsd.h vd, vj, vk` instruction. */
2347
+static void __attribute__((unused))
2348
+tcg_out_opc_vabsd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2349
+{
2350
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_H, vd, vj, vk));
2351
+}
2352
+
2353
+/* Emits the `vabsd.w vd, vj, vk` instruction. */
2354
+static void __attribute__((unused))
2355
+tcg_out_opc_vabsd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2356
+{
2357
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_W, vd, vj, vk));
2358
+}
2359
+
2360
+/* Emits the `vabsd.d vd, vj, vk` instruction. */
2361
+static void __attribute__((unused))
2362
+tcg_out_opc_vabsd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2363
+{
2364
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_D, vd, vj, vk));
2365
+}
2366
+
2367
+/* Emits the `vabsd.bu vd, vj, vk` instruction. */
2368
+static void __attribute__((unused))
2369
+tcg_out_opc_vabsd_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2370
+{
2371
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_BU, vd, vj, vk));
2372
+}
2373
+
2374
+/* Emits the `vabsd.hu vd, vj, vk` instruction. */
2375
+static void __attribute__((unused))
2376
+tcg_out_opc_vabsd_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2377
+{
2378
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_HU, vd, vj, vk));
2379
+}
2380
+
2381
+/* Emits the `vabsd.wu vd, vj, vk` instruction. */
2382
+static void __attribute__((unused))
2383
+tcg_out_opc_vabsd_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2384
+{
2385
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_WU, vd, vj, vk));
2386
+}
2387
+
2388
+/* Emits the `vabsd.du vd, vj, vk` instruction. */
2389
+static void __attribute__((unused))
2390
+tcg_out_opc_vabsd_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2391
+{
2392
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_DU, vd, vj, vk));
2393
+}
2394
+
2395
+/* Emits the `vavg.b vd, vj, vk` instruction. */
2396
+static void __attribute__((unused))
2397
+tcg_out_opc_vavg_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2398
+{
2399
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_B, vd, vj, vk));
2400
+}
2401
+
2402
+/* Emits the `vavg.h vd, vj, vk` instruction. */
2403
+static void __attribute__((unused))
2404
+tcg_out_opc_vavg_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2405
+{
2406
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_H, vd, vj, vk));
2407
+}
2408
+
2409
+/* Emits the `vavg.w vd, vj, vk` instruction. */
2410
+static void __attribute__((unused))
2411
+tcg_out_opc_vavg_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2412
+{
2413
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_W, vd, vj, vk));
2414
+}
2415
+
2416
+/* Emits the `vavg.d vd, vj, vk` instruction. */
2417
+static void __attribute__((unused))
2418
+tcg_out_opc_vavg_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2419
+{
2420
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_D, vd, vj, vk));
2421
+}
2422
+
2423
+/* Emits the `vavg.bu vd, vj, vk` instruction. */
2424
+static void __attribute__((unused))
2425
+tcg_out_opc_vavg_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2426
+{
2427
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_BU, vd, vj, vk));
2428
+}
2429
+
2430
+/* Emits the `vavg.hu vd, vj, vk` instruction. */
2431
+static void __attribute__((unused))
2432
+tcg_out_opc_vavg_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2433
+{
2434
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_HU, vd, vj, vk));
2435
+}
2436
+
2437
+/* Emits the `vavg.wu vd, vj, vk` instruction. */
2438
+static void __attribute__((unused))
2439
+tcg_out_opc_vavg_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2440
+{
2441
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_WU, vd, vj, vk));
2442
+}
2443
+
2444
+/* Emits the `vavg.du vd, vj, vk` instruction. */
2445
+static void __attribute__((unused))
2446
+tcg_out_opc_vavg_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2447
+{
2448
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_DU, vd, vj, vk));
2449
+}
2450
+
2451
+/* Emits the `vavgr.b vd, vj, vk` instruction. */
2452
+static void __attribute__((unused))
2453
+tcg_out_opc_vavgr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2454
+{
2455
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_B, vd, vj, vk));
2456
+}
2457
+
2458
+/* Emits the `vavgr.h vd, vj, vk` instruction. */
2459
+static void __attribute__((unused))
2460
+tcg_out_opc_vavgr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2461
+{
2462
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_H, vd, vj, vk));
2463
+}
2464
+
2465
+/* Emits the `vavgr.w vd, vj, vk` instruction. */
2466
+static void __attribute__((unused))
2467
+tcg_out_opc_vavgr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2468
+{
2469
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_W, vd, vj, vk));
2470
+}
2471
+
2472
+/* Emits the `vavgr.d vd, vj, vk` instruction. */
2473
+static void __attribute__((unused))
2474
+tcg_out_opc_vavgr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2475
+{
2476
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_D, vd, vj, vk));
2477
+}
2478
+
2479
+/* Emits the `vavgr.bu vd, vj, vk` instruction. */
2480
+static void __attribute__((unused))
2481
+tcg_out_opc_vavgr_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2482
+{
2483
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_BU, vd, vj, vk));
2484
+}
2485
+
2486
+/* Emits the `vavgr.hu vd, vj, vk` instruction. */
2487
+static void __attribute__((unused))
2488
+tcg_out_opc_vavgr_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2489
+{
2490
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_HU, vd, vj, vk));
2491
+}
2492
+
2493
+/* Emits the `vavgr.wu vd, vj, vk` instruction. */
2494
+static void __attribute__((unused))
2495
+tcg_out_opc_vavgr_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2496
+{
2497
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_WU, vd, vj, vk));
2498
+}
2499
+
2500
+/* Emits the `vavgr.du vd, vj, vk` instruction. */
2501
+static void __attribute__((unused))
2502
+tcg_out_opc_vavgr_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2503
+{
2504
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_DU, vd, vj, vk));
2505
+}
2506
+
2507
+/* Emits the `vmax.b vd, vj, vk` instruction. */
2508
+static void __attribute__((unused))
2509
+tcg_out_opc_vmax_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2510
+{
2511
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_B, vd, vj, vk));
2512
+}
2513
+
2514
+/* Emits the `vmax.h vd, vj, vk` instruction. */
2515
+static void __attribute__((unused))
2516
+tcg_out_opc_vmax_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2517
+{
2518
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_H, vd, vj, vk));
2519
+}
2520
+
2521
+/* Emits the `vmax.w vd, vj, vk` instruction. */
2522
+static void __attribute__((unused))
2523
+tcg_out_opc_vmax_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2524
+{
2525
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_W, vd, vj, vk));
2526
+}
2527
+
2528
+/* Emits the `vmax.d vd, vj, vk` instruction. */
2529
+static void __attribute__((unused))
2530
+tcg_out_opc_vmax_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2531
+{
2532
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_D, vd, vj, vk));
2533
+}
2534
+
2535
+/* Emits the `vmin.b vd, vj, vk` instruction. */
2536
+static void __attribute__((unused))
2537
+tcg_out_opc_vmin_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2538
+{
2539
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_B, vd, vj, vk));
2540
+}
2541
+
2542
+/* Emits the `vmin.h vd, vj, vk` instruction. */
2543
+static void __attribute__((unused))
2544
+tcg_out_opc_vmin_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2545
+{
2546
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_H, vd, vj, vk));
2547
+}
2548
+
2549
+/* Emits the `vmin.w vd, vj, vk` instruction. */
2550
+static void __attribute__((unused))
2551
+tcg_out_opc_vmin_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2552
+{
2553
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_W, vd, vj, vk));
2554
+}
2555
+
2556
+/* Emits the `vmin.d vd, vj, vk` instruction. */
2557
+static void __attribute__((unused))
2558
+tcg_out_opc_vmin_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2559
+{
2560
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_D, vd, vj, vk));
2561
+}
2562
+
2563
+/* Emits the `vmax.bu vd, vj, vk` instruction. */
2564
+static void __attribute__((unused))
2565
+tcg_out_opc_vmax_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2566
+{
2567
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_BU, vd, vj, vk));
2568
+}
2569
+
2570
+/* Emits the `vmax.hu vd, vj, vk` instruction. */
2571
+static void __attribute__((unused))
2572
+tcg_out_opc_vmax_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2573
+{
2574
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_HU, vd, vj, vk));
2575
+}
2576
+
2577
+/* Emits the `vmax.wu vd, vj, vk` instruction. */
2578
+static void __attribute__((unused))
2579
+tcg_out_opc_vmax_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2580
+{
2581
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_WU, vd, vj, vk));
2582
+}
2583
+
2584
+/* Emits the `vmax.du vd, vj, vk` instruction. */
2585
+static void __attribute__((unused))
2586
+tcg_out_opc_vmax_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2587
+{
2588
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_DU, vd, vj, vk));
2589
+}
2590
+
2591
+/* Emits the `vmin.bu vd, vj, vk` instruction. */
2592
+static void __attribute__((unused))
2593
+tcg_out_opc_vmin_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2594
+{
2595
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_BU, vd, vj, vk));
2596
+}
2597
+
2598
+/* Emits the `vmin.hu vd, vj, vk` instruction. */
2599
+static void __attribute__((unused))
2600
+tcg_out_opc_vmin_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2601
+{
2602
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_HU, vd, vj, vk));
2603
+}
2604
+
2605
+/* Emits the `vmin.wu vd, vj, vk` instruction. */
2606
+static void __attribute__((unused))
2607
+tcg_out_opc_vmin_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2608
+{
2609
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_WU, vd, vj, vk));
2610
+}
2611
+
2612
+/* Emits the `vmin.du vd, vj, vk` instruction. */
2613
+static void __attribute__((unused))
2614
+tcg_out_opc_vmin_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2615
+{
2616
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_DU, vd, vj, vk));
2617
+}
2618
+
2619
+/* Emits the `vmul.b vd, vj, vk` instruction. */
2620
+static void __attribute__((unused))
2621
+tcg_out_opc_vmul_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2622
+{
2623
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_B, vd, vj, vk));
2624
+}
2625
+
2626
+/* Emits the `vmul.h vd, vj, vk` instruction. */
2627
+static void __attribute__((unused))
2628
+tcg_out_opc_vmul_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2629
+{
2630
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_H, vd, vj, vk));
2631
+}
2632
+
2633
+/* Emits the `vmul.w vd, vj, vk` instruction. */
2634
+static void __attribute__((unused))
2635
+tcg_out_opc_vmul_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2636
+{
2637
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_W, vd, vj, vk));
2638
+}
2639
+
2640
+/* Emits the `vmul.d vd, vj, vk` instruction. */
2641
+static void __attribute__((unused))
2642
+tcg_out_opc_vmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2643
+{
2644
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_D, vd, vj, vk));
2645
+}
2646
+
2647
+/* Emits the `vmuh.b vd, vj, vk` instruction. */
2648
+static void __attribute__((unused))
2649
+tcg_out_opc_vmuh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2650
+{
2651
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_B, vd, vj, vk));
2652
+}
2653
+
2654
+/* Emits the `vmuh.h vd, vj, vk` instruction. */
2655
+static void __attribute__((unused))
2656
+tcg_out_opc_vmuh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2657
+{
2658
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_H, vd, vj, vk));
2659
+}
2660
+
2661
+/* Emits the `vmuh.w vd, vj, vk` instruction. */
2662
+static void __attribute__((unused))
2663
+tcg_out_opc_vmuh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2664
+{
2665
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_W, vd, vj, vk));
2666
+}
2667
+
2668
+/* Emits the `vmuh.d vd, vj, vk` instruction. */
2669
+static void __attribute__((unused))
2670
+tcg_out_opc_vmuh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2671
+{
2672
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_D, vd, vj, vk));
2673
+}
2674
+
2675
+/* Emits the `vmuh.bu vd, vj, vk` instruction. */
2676
+static void __attribute__((unused))
2677
+tcg_out_opc_vmuh_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2678
+{
2679
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_BU, vd, vj, vk));
2680
+}
2681
+
2682
+/* Emits the `vmuh.hu vd, vj, vk` instruction. */
2683
+static void __attribute__((unused))
2684
+tcg_out_opc_vmuh_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2685
+{
2686
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_HU, vd, vj, vk));
2687
+}
2688
+
2689
+/* Emits the `vmuh.wu vd, vj, vk` instruction. */
2690
+static void __attribute__((unused))
2691
+tcg_out_opc_vmuh_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2692
+{
2693
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_WU, vd, vj, vk));
2694
+}
2695
+
2696
+/* Emits the `vmuh.du vd, vj, vk` instruction. */
2697
+static void __attribute__((unused))
2698
+tcg_out_opc_vmuh_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2699
+{
2700
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_DU, vd, vj, vk));
2701
+}
2702
+
2703
+/* Emits the `vmulwev.h.b vd, vj, vk` instruction. */
2704
+static void __attribute__((unused))
2705
+tcg_out_opc_vmulwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2706
+{
2707
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_B, vd, vj, vk));
2708
+}
2709
+
2710
+/* Emits the `vmulwev.w.h vd, vj, vk` instruction. */
2711
+static void __attribute__((unused))
2712
+tcg_out_opc_vmulwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2713
+{
2714
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_H, vd, vj, vk));
2715
+}
2716
+
2717
+/* Emits the `vmulwev.d.w vd, vj, vk` instruction. */
2718
+static void __attribute__((unused))
2719
+tcg_out_opc_vmulwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2720
+{
2721
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_W, vd, vj, vk));
2722
+}
2723
+
2724
+/* Emits the `vmulwev.q.d vd, vj, vk` instruction. */
2725
+static void __attribute__((unused))
2726
+tcg_out_opc_vmulwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2727
+{
2728
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_D, vd, vj, vk));
2729
+}
2730
+
2731
+/* Emits the `vmulwod.h.b vd, vj, vk` instruction. */
2732
+static void __attribute__((unused))
2733
+tcg_out_opc_vmulwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2734
+{
2735
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_B, vd, vj, vk));
2736
+}
2737
+
2738
+/* Emits the `vmulwod.w.h vd, vj, vk` instruction. */
2739
+static void __attribute__((unused))
2740
+tcg_out_opc_vmulwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2741
+{
2742
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_H, vd, vj, vk));
2743
+}
2744
+
2745
+/* Emits the `vmulwod.d.w vd, vj, vk` instruction. */
2746
+static void __attribute__((unused))
2747
+tcg_out_opc_vmulwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2748
+{
2749
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_W, vd, vj, vk));
2750
+}
2751
+
2752
+/* Emits the `vmulwod.q.d vd, vj, vk` instruction. */
2753
+static void __attribute__((unused))
2754
+tcg_out_opc_vmulwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2755
+{
2756
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_D, vd, vj, vk));
2757
+}
2758
+
2759
+/* Emits the `vmulwev.h.bu vd, vj, vk` instruction. */
2760
+static void __attribute__((unused))
2761
+tcg_out_opc_vmulwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2762
+{
2763
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU, vd, vj, vk));
2764
+}
2765
+
2766
+/* Emits the `vmulwev.w.hu vd, vj, vk` instruction. */
2767
+static void __attribute__((unused))
2768
+tcg_out_opc_vmulwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2769
+{
2770
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU, vd, vj, vk));
2771
+}
2772
+
2773
+/* Emits the `vmulwev.d.wu vd, vj, vk` instruction. */
2774
+static void __attribute__((unused))
2775
+tcg_out_opc_vmulwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2776
+{
2777
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU, vd, vj, vk));
2778
+}
2779
+
2780
+/* Emits the `vmulwev.q.du vd, vj, vk` instruction. */
2781
+static void __attribute__((unused))
2782
+tcg_out_opc_vmulwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2783
+{
2784
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU, vd, vj, vk));
2785
+}
2786
+
2787
+/* Emits the `vmulwod.h.bu vd, vj, vk` instruction. */
2788
+static void __attribute__((unused))
2789
+tcg_out_opc_vmulwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2790
+{
2791
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU, vd, vj, vk));
2792
+}
2793
+
2794
+/* Emits the `vmulwod.w.hu vd, vj, vk` instruction. */
2795
+static void __attribute__((unused))
2796
+tcg_out_opc_vmulwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2797
+{
2798
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU, vd, vj, vk));
2799
+}
2800
+
2801
+/* Emits the `vmulwod.d.wu vd, vj, vk` instruction. */
2802
+static void __attribute__((unused))
2803
+tcg_out_opc_vmulwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2804
+{
2805
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU, vd, vj, vk));
2806
+}
2807
+
2808
+/* Emits the `vmulwod.q.du vd, vj, vk` instruction. */
2809
+static void __attribute__((unused))
2810
+tcg_out_opc_vmulwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2811
+{
2812
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU, vd, vj, vk));
2813
+}
2814
+
2815
+/* Emits the `vmulwev.h.bu.b vd, vj, vk` instruction. */
2816
+static void __attribute__((unused))
2817
+tcg_out_opc_vmulwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2818
+{
2819
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU_B, vd, vj, vk));
2820
+}
2821
+
2822
+/* Emits the `vmulwev.w.hu.h vd, vj, vk` instruction. */
2823
+static void __attribute__((unused))
2824
+tcg_out_opc_vmulwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2825
+{
2826
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU_H, vd, vj, vk));
2827
+}
2828
+
2829
+/* Emits the `vmulwev.d.wu.w vd, vj, vk` instruction. */
2830
+static void __attribute__((unused))
2831
+tcg_out_opc_vmulwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2832
+{
2833
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU_W, vd, vj, vk));
2834
+}
2835
+
2836
+/* Emits the `vmulwev.q.du.d vd, vj, vk` instruction. */
2837
+static void __attribute__((unused))
2838
+tcg_out_opc_vmulwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2839
+{
2840
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU_D, vd, vj, vk));
2841
+}
2842
+
2843
+/* Emits the `vmulwod.h.bu.b vd, vj, vk` instruction. */
2844
+static void __attribute__((unused))
2845
+tcg_out_opc_vmulwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2846
+{
2847
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU_B, vd, vj, vk));
2848
+}
2849
+
2850
+/* Emits the `vmulwod.w.hu.h vd, vj, vk` instruction. */
2851
+static void __attribute__((unused))
2852
+tcg_out_opc_vmulwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2853
+{
2854
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU_H, vd, vj, vk));
2855
+}
2856
+
2857
+/* Emits the `vmulwod.d.wu.w vd, vj, vk` instruction. */
2858
+static void __attribute__((unused))
2859
+tcg_out_opc_vmulwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2860
+{
2861
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU_W, vd, vj, vk));
2862
+}
2863
+
2864
+/* Emits the `vmulwod.q.du.d vd, vj, vk` instruction. */
2865
+static void __attribute__((unused))
2866
+tcg_out_opc_vmulwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2867
+{
2868
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU_D, vd, vj, vk));
2869
+}
2870
+
2871
+/* Emits the `vmadd.b vd, vj, vk` instruction. */
2872
+static void __attribute__((unused))
2873
+tcg_out_opc_vmadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2874
+{
2875
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_B, vd, vj, vk));
2876
+}
2877
+
2878
+/* Emits the `vmadd.h vd, vj, vk` instruction. */
2879
+static void __attribute__((unused))
2880
+tcg_out_opc_vmadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2881
+{
2882
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_H, vd, vj, vk));
2883
+}
2884
+
2885
+/* Emits the `vmadd.w vd, vj, vk` instruction. */
2886
+static void __attribute__((unused))
2887
+tcg_out_opc_vmadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2888
+{
2889
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_W, vd, vj, vk));
2890
+}
2891
+
2892
+/* Emits the `vmadd.d vd, vj, vk` instruction. */
2893
+static void __attribute__((unused))
2894
+tcg_out_opc_vmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2895
+{
2896
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_D, vd, vj, vk));
2897
+}
2898
+
2899
+/* Emits the `vmsub.b vd, vj, vk` instruction. */
2900
+static void __attribute__((unused))
2901
+tcg_out_opc_vmsub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2902
+{
2903
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_B, vd, vj, vk));
2904
+}
2905
+
2906
+/* Emits the `vmsub.h vd, vj, vk` instruction. */
2907
+static void __attribute__((unused))
2908
+tcg_out_opc_vmsub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2909
+{
2910
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_H, vd, vj, vk));
2911
+}
2912
+
2913
+/* Emits the `vmsub.w vd, vj, vk` instruction. */
2914
+static void __attribute__((unused))
2915
+tcg_out_opc_vmsub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2916
+{
2917
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_W, vd, vj, vk));
2918
+}
2919
+
2920
+/* Emits the `vmsub.d vd, vj, vk` instruction. */
2921
+static void __attribute__((unused))
2922
+tcg_out_opc_vmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2923
+{
2924
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_D, vd, vj, vk));
2925
+}
2926
+
2927
+/* Emits the `vmaddwev.h.b vd, vj, vk` instruction. */
2928
+static void __attribute__((unused))
2929
+tcg_out_opc_vmaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2930
+{
2931
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_B, vd, vj, vk));
2932
+}
2933
+
2934
+/* Emits the `vmaddwev.w.h vd, vj, vk` instruction. */
2935
+static void __attribute__((unused))
2936
+tcg_out_opc_vmaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2937
+{
2938
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_H, vd, vj, vk));
2939
+}
2940
+
2941
+/* Emits the `vmaddwev.d.w vd, vj, vk` instruction. */
2942
+static void __attribute__((unused))
2943
+tcg_out_opc_vmaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2944
+{
2945
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_W, vd, vj, vk));
2946
+}
2947
+
2948
+/* Emits the `vmaddwev.q.d vd, vj, vk` instruction. */
2949
+static void __attribute__((unused))
2950
+tcg_out_opc_vmaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2951
+{
2952
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_D, vd, vj, vk));
2953
+}
2954
+
2955
+/* Emits the `vmaddwod.h.b vd, vj, vk` instruction. */
2956
+static void __attribute__((unused))
2957
+tcg_out_opc_vmaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2958
+{
2959
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_B, vd, vj, vk));
2960
+}
2961
+
2962
+/* Emits the `vmaddwod.w.h vd, vj, vk` instruction. */
2963
+static void __attribute__((unused))
2964
+tcg_out_opc_vmaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2965
+{
2966
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_H, vd, vj, vk));
2967
+}
2968
+
2969
+/* Emits the `vmaddwod.d.w vd, vj, vk` instruction. */
2970
+static void __attribute__((unused))
2971
+tcg_out_opc_vmaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2972
+{
2973
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_W, vd, vj, vk));
2974
+}
2975
+
2976
+/* Emits the `vmaddwod.q.d vd, vj, vk` instruction. */
2977
+static void __attribute__((unused))
2978
+tcg_out_opc_vmaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2979
+{
2980
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_D, vd, vj, vk));
2981
+}
2982
+
2983
+/* Emits the `vmaddwev.h.bu vd, vj, vk` instruction. */
2984
+static void __attribute__((unused))
2985
+tcg_out_opc_vmaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2986
+{
2987
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU, vd, vj, vk));
2988
+}
2989
+
2990
+/* Emits the `vmaddwev.w.hu vd, vj, vk` instruction. */
2991
+static void __attribute__((unused))
2992
+tcg_out_opc_vmaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2993
+{
2994
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU, vd, vj, vk));
2995
+}
2996
+
2997
+/* Emits the `vmaddwev.d.wu vd, vj, vk` instruction. */
2998
+static void __attribute__((unused))
2999
+tcg_out_opc_vmaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3000
+{
3001
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU, vd, vj, vk));
3002
+}
3003
+
3004
+/* Emits the `vmaddwev.q.du vd, vj, vk` instruction. */
3005
+static void __attribute__((unused))
3006
+tcg_out_opc_vmaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3007
+{
3008
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU, vd, vj, vk));
3009
+}
3010
+
3011
+/* Emits the `vmaddwod.h.bu vd, vj, vk` instruction. */
3012
+static void __attribute__((unused))
3013
+tcg_out_opc_vmaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3014
+{
3015
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU, vd, vj, vk));
3016
+}
3017
+
3018
+/* Emits the `vmaddwod.w.hu vd, vj, vk` instruction. */
3019
+static void __attribute__((unused))
3020
+tcg_out_opc_vmaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3021
+{
3022
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU, vd, vj, vk));
3023
+}
3024
+
3025
+/* Emits the `vmaddwod.d.wu vd, vj, vk` instruction. */
3026
+static void __attribute__((unused))
3027
+tcg_out_opc_vmaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3028
+{
3029
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU, vd, vj, vk));
3030
+}
3031
+
3032
+/* Emits the `vmaddwod.q.du vd, vj, vk` instruction. */
3033
+static void __attribute__((unused))
3034
+tcg_out_opc_vmaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3035
+{
3036
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU, vd, vj, vk));
3037
+}
3038
+
3039
+/* Emits the `vmaddwev.h.bu.b vd, vj, vk` instruction. */
3040
+static void __attribute__((unused))
3041
+tcg_out_opc_vmaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3042
+{
3043
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU_B, vd, vj, vk));
3044
+}
3045
+
3046
+/* Emits the `vmaddwev.w.hu.h vd, vj, vk` instruction. */
3047
+static void __attribute__((unused))
3048
+tcg_out_opc_vmaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3049
+{
3050
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU_H, vd, vj, vk));
3051
+}
3052
+
3053
+/* Emits the `vmaddwev.d.wu.w vd, vj, vk` instruction. */
3054
+static void __attribute__((unused))
3055
+tcg_out_opc_vmaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3056
+{
3057
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU_W, vd, vj, vk));
3058
+}
3059
+
3060
+/* Emits the `vmaddwev.q.du.d vd, vj, vk` instruction. */
3061
+static void __attribute__((unused))
3062
+tcg_out_opc_vmaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3063
+{
3064
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU_D, vd, vj, vk));
3065
+}
3066
+
3067
+/* Emits the `vmaddwod.h.bu.b vd, vj, vk` instruction. */
3068
+static void __attribute__((unused))
3069
+tcg_out_opc_vmaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3070
+{
3071
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU_B, vd, vj, vk));
3072
+}
3073
+
3074
+/* Emits the `vmaddwod.w.hu.h vd, vj, vk` instruction. */
3075
+static void __attribute__((unused))
3076
+tcg_out_opc_vmaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3077
+{
3078
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU_H, vd, vj, vk));
3079
+}
3080
+
3081
+/* Emits the `vmaddwod.d.wu.w vd, vj, vk` instruction. */
3082
+static void __attribute__((unused))
3083
+tcg_out_opc_vmaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3084
+{
3085
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU_W, vd, vj, vk));
3086
+}
3087
+
3088
+/* Emits the `vmaddwod.q.du.d vd, vj, vk` instruction. */
3089
+static void __attribute__((unused))
3090
+tcg_out_opc_vmaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3091
+{
3092
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU_D, vd, vj, vk));
3093
+}
3094
+
3095
+/* Emits the `vdiv.b vd, vj, vk` instruction. */
3096
+static void __attribute__((unused))
3097
+tcg_out_opc_vdiv_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3098
+{
3099
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_B, vd, vj, vk));
3100
+}
3101
+
3102
+/* Emits the `vdiv.h vd, vj, vk` instruction. */
3103
+static void __attribute__((unused))
3104
+tcg_out_opc_vdiv_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3105
+{
3106
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_H, vd, vj, vk));
3107
+}
3108
+
3109
+/* Emits the `vdiv.w vd, vj, vk` instruction. */
3110
+static void __attribute__((unused))
3111
+tcg_out_opc_vdiv_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3112
+{
3113
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_W, vd, vj, vk));
3114
+}
3115
+
3116
+/* Emits the `vdiv.d vd, vj, vk` instruction. */
3117
+static void __attribute__((unused))
3118
+tcg_out_opc_vdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3119
+{
3120
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_D, vd, vj, vk));
3121
+}
3122
+
3123
+/* Emits the `vmod.b vd, vj, vk` instruction. */
3124
+static void __attribute__((unused))
3125
+tcg_out_opc_vmod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3126
+{
3127
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_B, vd, vj, vk));
3128
+}
3129
+
3130
+/* Emits the `vmod.h vd, vj, vk` instruction. */
3131
+static void __attribute__((unused))
3132
+tcg_out_opc_vmod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3133
+{
3134
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_H, vd, vj, vk));
3135
+}
3136
+
3137
+/* Emits the `vmod.w vd, vj, vk` instruction. */
3138
+static void __attribute__((unused))
3139
+tcg_out_opc_vmod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3140
+{
3141
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_W, vd, vj, vk));
3142
+}
3143
+
3144
+/* Emits the `vmod.d vd, vj, vk` instruction. */
3145
+static void __attribute__((unused))
3146
+tcg_out_opc_vmod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3147
+{
3148
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_D, vd, vj, vk));
3149
+}
3150
+
3151
+/* Emits the `vdiv.bu vd, vj, vk` instruction. */
3152
+static void __attribute__((unused))
3153
+tcg_out_opc_vdiv_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3154
+{
3155
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_BU, vd, vj, vk));
3156
+}
3157
+
3158
+/* Emits the `vdiv.hu vd, vj, vk` instruction. */
3159
+static void __attribute__((unused))
3160
+tcg_out_opc_vdiv_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3161
+{
3162
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_HU, vd, vj, vk));
3163
+}
3164
+
3165
+/* Emits the `vdiv.wu vd, vj, vk` instruction. */
3166
+static void __attribute__((unused))
3167
+tcg_out_opc_vdiv_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3168
+{
3169
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_WU, vd, vj, vk));
3170
+}
3171
+
3172
+/* Emits the `vdiv.du vd, vj, vk` instruction. */
3173
+static void __attribute__((unused))
3174
+tcg_out_opc_vdiv_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3175
+{
3176
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_DU, vd, vj, vk));
3177
+}
3178
+
3179
+/* Emits the `vmod.bu vd, vj, vk` instruction. */
3180
+static void __attribute__((unused))
3181
+tcg_out_opc_vmod_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3182
+{
3183
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_BU, vd, vj, vk));
3184
+}
3185
+
3186
+/* Emits the `vmod.hu vd, vj, vk` instruction. */
3187
+static void __attribute__((unused))
3188
+tcg_out_opc_vmod_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3189
+{
3190
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_HU, vd, vj, vk));
3191
+}
3192
+
3193
+/* Emits the `vmod.wu vd, vj, vk` instruction. */
3194
+static void __attribute__((unused))
3195
+tcg_out_opc_vmod_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3196
+{
3197
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_WU, vd, vj, vk));
3198
+}
3199
+
3200
+/* Emits the `vmod.du vd, vj, vk` instruction. */
3201
+static void __attribute__((unused))
3202
+tcg_out_opc_vmod_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3203
+{
3204
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_DU, vd, vj, vk));
3205
+}
3206
+
3207
+/* Emits the `vsll.b vd, vj, vk` instruction. */
3208
+static void __attribute__((unused))
3209
+tcg_out_opc_vsll_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3210
+{
3211
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_B, vd, vj, vk));
3212
+}
3213
+
3214
+/* Emits the `vsll.h vd, vj, vk` instruction. */
3215
+static void __attribute__((unused))
3216
+tcg_out_opc_vsll_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3217
+{
3218
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_H, vd, vj, vk));
3219
+}
3220
+
3221
+/* Emits the `vsll.w vd, vj, vk` instruction. */
3222
+static void __attribute__((unused))
3223
+tcg_out_opc_vsll_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3224
+{
3225
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_W, vd, vj, vk));
3226
+}
3227
+
3228
+/* Emits the `vsll.d vd, vj, vk` instruction. */
3229
+static void __attribute__((unused))
3230
+tcg_out_opc_vsll_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3231
+{
3232
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_D, vd, vj, vk));
3233
+}
3234
+
3235
+/* Emits the `vsrl.b vd, vj, vk` instruction. */
3236
+static void __attribute__((unused))
3237
+tcg_out_opc_vsrl_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3238
+{
3239
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_B, vd, vj, vk));
3240
+}
3241
+
3242
+/* Emits the `vsrl.h vd, vj, vk` instruction. */
3243
+static void __attribute__((unused))
3244
+tcg_out_opc_vsrl_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3245
+{
3246
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_H, vd, vj, vk));
3247
+}
3248
+
3249
+/* Emits the `vsrl.w vd, vj, vk` instruction. */
3250
+static void __attribute__((unused))
3251
+tcg_out_opc_vsrl_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3252
+{
3253
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_W, vd, vj, vk));
3254
+}
3255
+
3256
+/* Emits the `vsrl.d vd, vj, vk` instruction. */
3257
+static void __attribute__((unused))
3258
+tcg_out_opc_vsrl_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3259
+{
3260
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_D, vd, vj, vk));
3261
+}
3262
+
3263
+/* Emits the `vsra.b vd, vj, vk` instruction. */
3264
+static void __attribute__((unused))
3265
+tcg_out_opc_vsra_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3266
+{
3267
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_B, vd, vj, vk));
3268
+}
3269
+
3270
+/* Emits the `vsra.h vd, vj, vk` instruction. */
3271
+static void __attribute__((unused))
3272
+tcg_out_opc_vsra_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3273
+{
3274
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_H, vd, vj, vk));
3275
+}
3276
+
3277
+/* Emits the `vsra.w vd, vj, vk` instruction. */
3278
+static void __attribute__((unused))
3279
+tcg_out_opc_vsra_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3280
+{
3281
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_W, vd, vj, vk));
3282
+}
3283
+
3284
+/* Emits the `vsra.d vd, vj, vk` instruction. */
3285
+static void __attribute__((unused))
3286
+tcg_out_opc_vsra_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3287
+{
3288
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_D, vd, vj, vk));
3289
+}
3290
+
3291
+/* Emits the `vrotr.b vd, vj, vk` instruction. */
3292
+static void __attribute__((unused))
3293
+tcg_out_opc_vrotr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3294
+{
3295
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_B, vd, vj, vk));
3296
+}
3297
+
3298
+/* Emits the `vrotr.h vd, vj, vk` instruction. */
3299
+static void __attribute__((unused))
3300
+tcg_out_opc_vrotr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3301
+{
3302
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_H, vd, vj, vk));
3303
+}
3304
+
3305
+/* Emits the `vrotr.w vd, vj, vk` instruction. */
3306
+static void __attribute__((unused))
3307
+tcg_out_opc_vrotr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3308
+{
3309
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_W, vd, vj, vk));
3310
+}
3311
+
3312
+/* Emits the `vrotr.d vd, vj, vk` instruction. */
3313
+static void __attribute__((unused))
3314
+tcg_out_opc_vrotr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3315
+{
3316
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_D, vd, vj, vk));
3317
+}
3318
+
3319
+/* Emits the `vsrlr.b vd, vj, vk` instruction. */
3320
+static void __attribute__((unused))
3321
+tcg_out_opc_vsrlr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3322
+{
3323
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_B, vd, vj, vk));
3324
+}
3325
+
3326
+/* Emits the `vsrlr.h vd, vj, vk` instruction. */
3327
+static void __attribute__((unused))
3328
+tcg_out_opc_vsrlr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3329
+{
3330
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_H, vd, vj, vk));
3331
+}
3332
+
3333
+/* Emits the `vsrlr.w vd, vj, vk` instruction. */
3334
+static void __attribute__((unused))
3335
+tcg_out_opc_vsrlr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3336
+{
3337
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_W, vd, vj, vk));
3338
+}
3339
+
3340
+/* Emits the `vsrlr.d vd, vj, vk` instruction. */
3341
+static void __attribute__((unused))
3342
+tcg_out_opc_vsrlr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3343
+{
3344
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_D, vd, vj, vk));
3345
+}
3346
+
3347
+/* Emits the `vsrar.b vd, vj, vk` instruction. */
3348
+static void __attribute__((unused))
3349
+tcg_out_opc_vsrar_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3350
+{
3351
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_B, vd, vj, vk));
3352
+}
3353
+
3354
+/* Emits the `vsrar.h vd, vj, vk` instruction. */
3355
+static void __attribute__((unused))
3356
+tcg_out_opc_vsrar_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3357
+{
3358
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_H, vd, vj, vk));
3359
+}
3360
+
3361
+/* Emits the `vsrar.w vd, vj, vk` instruction. */
3362
+static void __attribute__((unused))
3363
+tcg_out_opc_vsrar_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3364
+{
3365
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_W, vd, vj, vk));
3366
+}
3367
+
3368
+/* Emits the `vsrar.d vd, vj, vk` instruction. */
3369
+static void __attribute__((unused))
3370
+tcg_out_opc_vsrar_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3371
+{
3372
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_D, vd, vj, vk));
3373
+}
3374
+
3375
+/* Emits the `vsrln.b.h vd, vj, vk` instruction. */
3376
+static void __attribute__((unused))
3377
+tcg_out_opc_vsrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3378
+{
3379
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_B_H, vd, vj, vk));
3380
+}
3381
+
3382
+/* Emits the `vsrln.h.w vd, vj, vk` instruction. */
3383
+static void __attribute__((unused))
3384
+tcg_out_opc_vsrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3385
+{
3386
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_H_W, vd, vj, vk));
3387
+}
3388
+
3389
+/* Emits the `vsrln.w.d vd, vj, vk` instruction. */
3390
+static void __attribute__((unused))
3391
+tcg_out_opc_vsrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3392
+{
3393
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_W_D, vd, vj, vk));
3394
+}
3395
+
3396
+/* Emits the `vsran.b.h vd, vj, vk` instruction. */
3397
+static void __attribute__((unused))
3398
+tcg_out_opc_vsran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3399
+{
3400
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_B_H, vd, vj, vk));
3401
+}
3402
+
3403
+/* Emits the `vsran.h.w vd, vj, vk` instruction. */
3404
+static void __attribute__((unused))
3405
+tcg_out_opc_vsran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3406
+{
3407
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_H_W, vd, vj, vk));
3408
+}
3409
+
3410
+/* Emits the `vsran.w.d vd, vj, vk` instruction. */
3411
+static void __attribute__((unused))
3412
+tcg_out_opc_vsran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3413
+{
3414
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_W_D, vd, vj, vk));
3415
+}
3416
+
3417
+/* Emits the `vsrlrn.b.h vd, vj, vk` instruction. */
3418
+static void __attribute__((unused))
3419
+tcg_out_opc_vsrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3420
+{
3421
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_B_H, vd, vj, vk));
3422
+}
3423
+
3424
+/* Emits the `vsrlrn.h.w vd, vj, vk` instruction. */
3425
+static void __attribute__((unused))
3426
+tcg_out_opc_vsrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3427
+{
3428
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_H_W, vd, vj, vk));
3429
+}
3430
+
3431
+/* Emits the `vsrlrn.w.d vd, vj, vk` instruction. */
3432
+static void __attribute__((unused))
3433
+tcg_out_opc_vsrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3434
+{
3435
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_W_D, vd, vj, vk));
3436
+}
3437
+
3438
+/* Emits the `vsrarn.b.h vd, vj, vk` instruction. */
3439
+static void __attribute__((unused))
3440
+tcg_out_opc_vsrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3441
+{
3442
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_B_H, vd, vj, vk));
3443
+}
3444
+
3445
+/* Emits the `vsrarn.h.w vd, vj, vk` instruction. */
3446
+static void __attribute__((unused))
3447
+tcg_out_opc_vsrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3448
+{
3449
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_H_W, vd, vj, vk));
3450
+}
3451
+
3452
+/* Emits the `vsrarn.w.d vd, vj, vk` instruction. */
3453
+static void __attribute__((unused))
3454
+tcg_out_opc_vsrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3455
+{
3456
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_W_D, vd, vj, vk));
3457
+}
3458
+
3459
+/* Emits the `vssrln.b.h vd, vj, vk` instruction. */
3460
+static void __attribute__((unused))
3461
+tcg_out_opc_vssrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3462
+{
3463
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_B_H, vd, vj, vk));
3464
+}
3465
+
3466
+/* Emits the `vssrln.h.w vd, vj, vk` instruction. */
3467
+static void __attribute__((unused))
3468
+tcg_out_opc_vssrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3469
+{
3470
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_H_W, vd, vj, vk));
3471
+}
3472
+
3473
+/* Emits the `vssrln.w.d vd, vj, vk` instruction. */
3474
+static void __attribute__((unused))
3475
+tcg_out_opc_vssrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3476
+{
3477
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_W_D, vd, vj, vk));
3478
+}
3479
+
3480
+/* Emits the `vssran.b.h vd, vj, vk` instruction. */
3481
+static void __attribute__((unused))
3482
+tcg_out_opc_vssran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3483
+{
3484
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_B_H, vd, vj, vk));
3485
+}
3486
+
3487
+/* Emits the `vssran.h.w vd, vj, vk` instruction. */
3488
+static void __attribute__((unused))
3489
+tcg_out_opc_vssran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3490
+{
3491
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_H_W, vd, vj, vk));
3492
+}
3493
+
3494
+/* Emits the `vssran.w.d vd, vj, vk` instruction. */
3495
+static void __attribute__((unused))
3496
+tcg_out_opc_vssran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3497
+{
3498
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_W_D, vd, vj, vk));
3499
+}
3500
+
3501
+/* Emits the `vssrlrn.b.h vd, vj, vk` instruction. */
3502
+static void __attribute__((unused))
3503
+tcg_out_opc_vssrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3504
+{
3505
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_B_H, vd, vj, vk));
3506
+}
3507
+
3508
+/* Emits the `vssrlrn.h.w vd, vj, vk` instruction. */
3509
+static void __attribute__((unused))
3510
+tcg_out_opc_vssrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3511
+{
3512
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_H_W, vd, vj, vk));
3513
+}
3514
+
3515
+/* Emits the `vssrlrn.w.d vd, vj, vk` instruction. */
3516
+static void __attribute__((unused))
3517
+tcg_out_opc_vssrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3518
+{
3519
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_W_D, vd, vj, vk));
3520
+}
3521
+
3522
+/* Emits the `vssrarn.b.h vd, vj, vk` instruction. */
3523
+static void __attribute__((unused))
3524
+tcg_out_opc_vssrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3525
+{
3526
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_B_H, vd, vj, vk));
3527
+}
3528
+
3529
+/* Emits the `vssrarn.h.w vd, vj, vk` instruction. */
3530
+static void __attribute__((unused))
3531
+tcg_out_opc_vssrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3532
+{
3533
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_H_W, vd, vj, vk));
3534
+}
3535
+
3536
+/* Emits the `vssrarn.w.d vd, vj, vk` instruction. */
3537
+static void __attribute__((unused))
3538
+tcg_out_opc_vssrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3539
+{
3540
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_W_D, vd, vj, vk));
3541
+}
3542
+
3543
+/* Emits the `vssrln.bu.h vd, vj, vk` instruction. */
3544
+static void __attribute__((unused))
3545
+tcg_out_opc_vssrln_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3546
+{
3547
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_BU_H, vd, vj, vk));
3548
+}
3549
+
3550
+/* Emits the `vssrln.hu.w vd, vj, vk` instruction. */
3551
+static void __attribute__((unused))
3552
+tcg_out_opc_vssrln_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3553
+{
3554
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_HU_W, vd, vj, vk));
3555
+}
3556
+
3557
+/* Emits the `vssrln.wu.d vd, vj, vk` instruction. */
3558
+static void __attribute__((unused))
3559
+tcg_out_opc_vssrln_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3560
+{
3561
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_WU_D, vd, vj, vk));
3562
+}
3563
+
3564
+/* Emits the `vssran.bu.h vd, vj, vk` instruction. */
3565
+static void __attribute__((unused))
3566
+tcg_out_opc_vssran_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3567
+{
3568
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_BU_H, vd, vj, vk));
3569
+}
3570
+
3571
+/* Emits the `vssran.hu.w vd, vj, vk` instruction. */
3572
+static void __attribute__((unused))
3573
+tcg_out_opc_vssran_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3574
+{
3575
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_HU_W, vd, vj, vk));
3576
+}
3577
+
3578
+/* Emits the `vssran.wu.d vd, vj, vk` instruction. */
3579
+static void __attribute__((unused))
3580
+tcg_out_opc_vssran_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3581
+{
3582
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_WU_D, vd, vj, vk));
3583
+}
3584
+
3585
+/* Emits the `vssrlrn.bu.h vd, vj, vk` instruction. */
3586
+static void __attribute__((unused))
3587
+tcg_out_opc_vssrlrn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3588
+{
3589
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_BU_H, vd, vj, vk));
3590
+}
3591
+
3592
+/* Emits the `vssrlrn.hu.w vd, vj, vk` instruction. */
3593
+static void __attribute__((unused))
3594
+tcg_out_opc_vssrlrn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3595
+{
3596
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_HU_W, vd, vj, vk));
3597
+}
3598
+
3599
+/* Emits the `vssrlrn.wu.d vd, vj, vk` instruction. */
3600
+static void __attribute__((unused))
3601
+tcg_out_opc_vssrlrn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3602
+{
3603
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_WU_D, vd, vj, vk));
3604
+}
3605
+
3606
+/* Emits the `vssrarn.bu.h vd, vj, vk` instruction. */
3607
+static void __attribute__((unused))
3608
+tcg_out_opc_vssrarn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3609
+{
3610
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_BU_H, vd, vj, vk));
3611
+}
3612
+
3613
+/* Emits the `vssrarn.hu.w vd, vj, vk` instruction. */
3614
+static void __attribute__((unused))
3615
+tcg_out_opc_vssrarn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3616
+{
3617
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_HU_W, vd, vj, vk));
3618
+}
3619
+
3620
+/* Emits the `vssrarn.wu.d vd, vj, vk` instruction. */
3621
+static void __attribute__((unused))
3622
+tcg_out_opc_vssrarn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3623
+{
3624
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_WU_D, vd, vj, vk));
3625
+}
3626
+
3627
+/* Emits the `vbitclr.b vd, vj, vk` instruction. */
3628
+static void __attribute__((unused))
3629
+tcg_out_opc_vbitclr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3630
+{
3631
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_B, vd, vj, vk));
3632
+}
3633
+
3634
+/* Emits the `vbitclr.h vd, vj, vk` instruction. */
3635
+static void __attribute__((unused))
3636
+tcg_out_opc_vbitclr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3637
+{
3638
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_H, vd, vj, vk));
3639
+}
3640
+
3641
+/* Emits the `vbitclr.w vd, vj, vk` instruction. */
3642
+static void __attribute__((unused))
3643
+tcg_out_opc_vbitclr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3644
+{
3645
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_W, vd, vj, vk));
3646
+}
3647
+
3648
+/* Emits the `vbitclr.d vd, vj, vk` instruction. */
3649
+static void __attribute__((unused))
3650
+tcg_out_opc_vbitclr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3651
+{
3652
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_D, vd, vj, vk));
3653
+}
3654
+
3655
+/* Emits the `vbitset.b vd, vj, vk` instruction. */
3656
+static void __attribute__((unused))
3657
+tcg_out_opc_vbitset_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3658
+{
3659
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_B, vd, vj, vk));
3660
+}
3661
+
3662
+/* Emits the `vbitset.h vd, vj, vk` instruction. */
3663
+static void __attribute__((unused))
3664
+tcg_out_opc_vbitset_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3665
+{
3666
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_H, vd, vj, vk));
3667
+}
3668
+
3669
+/* Emits the `vbitset.w vd, vj, vk` instruction. */
3670
+static void __attribute__((unused))
3671
+tcg_out_opc_vbitset_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3672
+{
3673
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_W, vd, vj, vk));
3674
+}
3675
+
3676
+/* Emits the `vbitset.d vd, vj, vk` instruction. */
3677
+static void __attribute__((unused))
3678
+tcg_out_opc_vbitset_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3679
+{
3680
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_D, vd, vj, vk));
3681
+}
3682
+
3683
+/* Emits the `vbitrev.b vd, vj, vk` instruction. */
3684
+static void __attribute__((unused))
3685
+tcg_out_opc_vbitrev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3686
+{
3687
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_B, vd, vj, vk));
3688
+}
3689
+
3690
+/* Emits the `vbitrev.h vd, vj, vk` instruction. */
3691
+static void __attribute__((unused))
3692
+tcg_out_opc_vbitrev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3693
+{
3694
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_H, vd, vj, vk));
3695
+}
3696
+
3697
+/* Emits the `vbitrev.w vd, vj, vk` instruction. */
3698
+static void __attribute__((unused))
3699
+tcg_out_opc_vbitrev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3700
+{
3701
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_W, vd, vj, vk));
3702
+}
3703
+
3704
+/* Emits the `vbitrev.d vd, vj, vk` instruction. */
3705
+static void __attribute__((unused))
3706
+tcg_out_opc_vbitrev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3707
+{
3708
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_D, vd, vj, vk));
3709
+}
3710
+
3711
+/* Emits the `vpackev.b vd, vj, vk` instruction. */
3712
+static void __attribute__((unused))
3713
+tcg_out_opc_vpackev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3714
+{
3715
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_B, vd, vj, vk));
3716
+}
3717
+
3718
+/* Emits the `vpackev.h vd, vj, vk` instruction. */
3719
+static void __attribute__((unused))
3720
+tcg_out_opc_vpackev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3721
+{
3722
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_H, vd, vj, vk));
3723
+}
3724
+
3725
+/* Emits the `vpackev.w vd, vj, vk` instruction. */
3726
+static void __attribute__((unused))
3727
+tcg_out_opc_vpackev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3728
+{
3729
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_W, vd, vj, vk));
3730
+}
3731
+
3732
+/* Emits the `vpackev.d vd, vj, vk` instruction. */
3733
+static void __attribute__((unused))
3734
+tcg_out_opc_vpackev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3735
+{
3736
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_D, vd, vj, vk));
3737
+}
3738
+
3739
+/* Emits the `vpackod.b vd, vj, vk` instruction. */
3740
+static void __attribute__((unused))
3741
+tcg_out_opc_vpackod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3742
+{
3743
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_B, vd, vj, vk));
3744
+}
3745
+
3746
+/* Emits the `vpackod.h vd, vj, vk` instruction. */
3747
+static void __attribute__((unused))
3748
+tcg_out_opc_vpackod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3749
+{
3750
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_H, vd, vj, vk));
3751
+}
3752
+
3753
+/* Emits the `vpackod.w vd, vj, vk` instruction. */
3754
+static void __attribute__((unused))
3755
+tcg_out_opc_vpackod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3756
+{
3757
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_W, vd, vj, vk));
3758
+}
3759
+
3760
+/* Emits the `vpackod.d vd, vj, vk` instruction. */
3761
+static void __attribute__((unused))
3762
+tcg_out_opc_vpackod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3763
+{
3764
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_D, vd, vj, vk));
3765
+}
3766
+
3767
+/* Emits the `vilvl.b vd, vj, vk` instruction. */
3768
+static void __attribute__((unused))
3769
+tcg_out_opc_vilvl_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3770
+{
3771
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_B, vd, vj, vk));
3772
+}
3773
+
3774
+/* Emits the `vilvl.h vd, vj, vk` instruction. */
3775
+static void __attribute__((unused))
3776
+tcg_out_opc_vilvl_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3777
+{
3778
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_H, vd, vj, vk));
3779
+}
3780
+
3781
+/* Emits the `vilvl.w vd, vj, vk` instruction. */
3782
+static void __attribute__((unused))
3783
+tcg_out_opc_vilvl_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3784
+{
3785
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_W, vd, vj, vk));
3786
+}
3787
+
3788
+/* Emits the `vilvl.d vd, vj, vk` instruction. */
3789
+static void __attribute__((unused))
3790
+tcg_out_opc_vilvl_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3791
+{
3792
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_D, vd, vj, vk));
3793
+}
3794
+
3795
+/* Emits the `vilvh.b vd, vj, vk` instruction. */
3796
+static void __attribute__((unused))
3797
+tcg_out_opc_vilvh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3798
+{
3799
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_B, vd, vj, vk));
3800
+}
3801
+
3802
+/* Emits the `vilvh.h vd, vj, vk` instruction. */
3803
+static void __attribute__((unused))
3804
+tcg_out_opc_vilvh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3805
+{
3806
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_H, vd, vj, vk));
3807
+}
3808
+
3809
+/* Emits the `vilvh.w vd, vj, vk` instruction. */
3810
+static void __attribute__((unused))
3811
+tcg_out_opc_vilvh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3812
+{
3813
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_W, vd, vj, vk));
3814
+}
3815
+
3816
+/* Emits the `vilvh.d vd, vj, vk` instruction. */
3817
+static void __attribute__((unused))
3818
+tcg_out_opc_vilvh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3819
+{
3820
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_D, vd, vj, vk));
3821
+}
3822
+
3823
+/* Emits the `vpickev.b vd, vj, vk` instruction. */
3824
+static void __attribute__((unused))
3825
+tcg_out_opc_vpickev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3826
+{
3827
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_B, vd, vj, vk));
3828
+}
3829
+
3830
+/* Emits the `vpickev.h vd, vj, vk` instruction. */
3831
+static void __attribute__((unused))
3832
+tcg_out_opc_vpickev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3833
+{
3834
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_H, vd, vj, vk));
3835
+}
3836
+
3837
+/* Emits the `vpickev.w vd, vj, vk` instruction. */
3838
+static void __attribute__((unused))
3839
+tcg_out_opc_vpickev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3840
+{
3841
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_W, vd, vj, vk));
3842
+}
3843
+
3844
+/* Emits the `vpickev.d vd, vj, vk` instruction. */
3845
+static void __attribute__((unused))
3846
+tcg_out_opc_vpickev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3847
+{
3848
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_D, vd, vj, vk));
3849
+}
3850
+
3851
+/* Emits the `vpickod.b vd, vj, vk` instruction. */
3852
+static void __attribute__((unused))
3853
+tcg_out_opc_vpickod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3854
+{
3855
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_B, vd, vj, vk));
3856
+}
3857
+
3858
+/* Emits the `vpickod.h vd, vj, vk` instruction. */
3859
+static void __attribute__((unused))
3860
+tcg_out_opc_vpickod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3861
+{
3862
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_H, vd, vj, vk));
3863
+}
3864
+
3865
+/* Emits the `vpickod.w vd, vj, vk` instruction. */
3866
+static void __attribute__((unused))
3867
+tcg_out_opc_vpickod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3868
+{
3869
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_W, vd, vj, vk));
3870
+}
3871
+
3872
+/* Emits the `vpickod.d vd, vj, vk` instruction. */
3873
+static void __attribute__((unused))
3874
+tcg_out_opc_vpickod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3875
+{
3876
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_D, vd, vj, vk));
3877
+}
3878
+
3879
+/* Emits the `vreplve.b vd, vj, k` instruction. */
3880
+static void __attribute__((unused))
3881
+tcg_out_opc_vreplve_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3882
+{
3883
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_B, vd, vj, k));
3884
+}
3885
+
3886
+/* Emits the `vreplve.h vd, vj, k` instruction. */
3887
+static void __attribute__((unused))
3888
+tcg_out_opc_vreplve_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3889
+{
3890
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_H, vd, vj, k));
3891
+}
3892
+
3893
+/* Emits the `vreplve.w vd, vj, k` instruction. */
3894
+static void __attribute__((unused))
3895
+tcg_out_opc_vreplve_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3896
+{
3897
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_W, vd, vj, k));
3898
+}
3899
+
3900
+/* Emits the `vreplve.d vd, vj, k` instruction. */
3901
+static void __attribute__((unused))
3902
+tcg_out_opc_vreplve_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3903
+{
3904
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_D, vd, vj, k));
3905
+}
3906
+
3907
+/* Emits the `vand.v vd, vj, vk` instruction. */
3908
+static void __attribute__((unused))
3909
+tcg_out_opc_vand_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3910
+{
3911
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAND_V, vd, vj, vk));
3912
+}
3913
+
3914
+/* Emits the `vor.v vd, vj, vk` instruction. */
3915
+static void __attribute__((unused))
3916
+tcg_out_opc_vor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3917
+{
3918
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VOR_V, vd, vj, vk));
3919
+}
3920
+
3921
+/* Emits the `vxor.v vd, vj, vk` instruction. */
3922
+static void __attribute__((unused))
3923
+tcg_out_opc_vxor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3924
+{
3925
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VXOR_V, vd, vj, vk));
3926
+}
3927
+
3928
+/* Emits the `vnor.v vd, vj, vk` instruction. */
3929
+static void __attribute__((unused))
3930
+tcg_out_opc_vnor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3931
+{
3932
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VNOR_V, vd, vj, vk));
3933
+}
3934
+
3935
+/* Emits the `vandn.v vd, vj, vk` instruction. */
3936
+static void __attribute__((unused))
3937
+tcg_out_opc_vandn_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3938
+{
3939
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VANDN_V, vd, vj, vk));
3940
+}
3941
+
3942
+/* Emits the `vorn.v vd, vj, vk` instruction. */
3943
+static void __attribute__((unused))
3944
+tcg_out_opc_vorn_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3945
+{
3946
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VORN_V, vd, vj, vk));
3947
+}
3948
+
3949
+/* Emits the `vfrstp.b vd, vj, vk` instruction. */
3950
+static void __attribute__((unused))
3951
+tcg_out_opc_vfrstp_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3952
+{
3953
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_B, vd, vj, vk));
3954
+}
3955
+
3956
+/* Emits the `vfrstp.h vd, vj, vk` instruction. */
3957
+static void __attribute__((unused))
3958
+tcg_out_opc_vfrstp_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3959
+{
3960
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_H, vd, vj, vk));
3961
+}
3962
+
3963
+/* Emits the `vadd.q vd, vj, vk` instruction. */
3964
+static void __attribute__((unused))
3965
+tcg_out_opc_vadd_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3966
+{
3967
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_Q, vd, vj, vk));
3968
+}
3969
+
3970
+/* Emits the `vsub.q vd, vj, vk` instruction. */
3971
+static void __attribute__((unused))
3972
+tcg_out_opc_vsub_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3973
+{
3974
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_Q, vd, vj, vk));
3975
+}
3976
+
3977
+/* Emits the `vsigncov.b vd, vj, vk` instruction. */
3978
+static void __attribute__((unused))
3979
+tcg_out_opc_vsigncov_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3980
+{
3981
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_B, vd, vj, vk));
3982
+}
3983
+
3984
+/* Emits the `vsigncov.h vd, vj, vk` instruction. */
3985
+static void __attribute__((unused))
3986
+tcg_out_opc_vsigncov_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3987
+{
3988
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_H, vd, vj, vk));
3989
+}
3990
+
3991
+/* Emits the `vsigncov.w vd, vj, vk` instruction. */
3992
+static void __attribute__((unused))
3993
+tcg_out_opc_vsigncov_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3994
+{
3995
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_W, vd, vj, vk));
3996
+}
3997
+
3998
+/* Emits the `vsigncov.d vd, vj, vk` instruction. */
3999
+static void __attribute__((unused))
4000
+tcg_out_opc_vsigncov_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4001
+{
4002
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_D, vd, vj, vk));
4003
+}
4004
+
4005
+/* Emits the `vfadd.s vd, vj, vk` instruction. */
4006
+static void __attribute__((unused))
4007
+tcg_out_opc_vfadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4008
+{
4009
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_S, vd, vj, vk));
4010
+}
4011
+
4012
+/* Emits the `vfadd.d vd, vj, vk` instruction. */
4013
+static void __attribute__((unused))
4014
+tcg_out_opc_vfadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4015
+{
4016
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_D, vd, vj, vk));
4017
+}
4018
+
4019
+/* Emits the `vfsub.s vd, vj, vk` instruction. */
4020
+static void __attribute__((unused))
4021
+tcg_out_opc_vfsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4022
+{
4023
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_S, vd, vj, vk));
4024
+}
4025
+
4026
+/* Emits the `vfsub.d vd, vj, vk` instruction. */
4027
+static void __attribute__((unused))
4028
+tcg_out_opc_vfsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4029
+{
4030
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_D, vd, vj, vk));
4031
+}
4032
+
4033
+/* Emits the `vfmul.s vd, vj, vk` instruction. */
4034
+static void __attribute__((unused))
4035
+tcg_out_opc_vfmul_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4036
+{
4037
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_S, vd, vj, vk));
4038
+}
4039
+
4040
+/* Emits the `vfmul.d vd, vj, vk` instruction. */
4041
+static void __attribute__((unused))
4042
+tcg_out_opc_vfmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4043
+{
4044
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_D, vd, vj, vk));
4045
+}
4046
+
4047
+/* Emits the `vfdiv.s vd, vj, vk` instruction. */
4048
+static void __attribute__((unused))
4049
+tcg_out_opc_vfdiv_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4050
+{
4051
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_S, vd, vj, vk));
4052
+}
4053
+
4054
+/* Emits the `vfdiv.d vd, vj, vk` instruction. */
4055
+static void __attribute__((unused))
4056
+tcg_out_opc_vfdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4057
+{
4058
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_D, vd, vj, vk));
4059
+}
4060
+
4061
+/* Emits the `vfmax.s vd, vj, vk` instruction. */
4062
+static void __attribute__((unused))
4063
+tcg_out_opc_vfmax_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4064
+{
4065
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_S, vd, vj, vk));
4066
+}
4067
+
4068
+/* Emits the `vfmax.d vd, vj, vk` instruction. */
4069
+static void __attribute__((unused))
4070
+tcg_out_opc_vfmax_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4071
+{
4072
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_D, vd, vj, vk));
4073
+}
4074
+
4075
+/* Emits the `vfmin.s vd, vj, vk` instruction. */
4076
+static void __attribute__((unused))
4077
+tcg_out_opc_vfmin_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4078
+{
4079
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_S, vd, vj, vk));
4080
+}
4081
+
4082
+/* Emits the `vfmin.d vd, vj, vk` instruction. */
4083
+static void __attribute__((unused))
4084
+tcg_out_opc_vfmin_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4085
+{
4086
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_D, vd, vj, vk));
4087
+}
4088
+
4089
+/* Emits the `vfmaxa.s vd, vj, vk` instruction. */
4090
+static void __attribute__((unused))
4091
+tcg_out_opc_vfmaxa_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4092
+{
4093
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_S, vd, vj, vk));
4094
+}
4095
+
4096
+/* Emits the `vfmaxa.d vd, vj, vk` instruction. */
4097
+static void __attribute__((unused))
4098
+tcg_out_opc_vfmaxa_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4099
+{
4100
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_D, vd, vj, vk));
4101
+}
4102
+
4103
+/* Emits the `vfmina.s vd, vj, vk` instruction. */
4104
+static void __attribute__((unused))
4105
+tcg_out_opc_vfmina_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4106
+{
4107
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_S, vd, vj, vk));
4108
+}
4109
+
4110
+/* Emits the `vfmina.d vd, vj, vk` instruction. */
4111
+static void __attribute__((unused))
4112
+tcg_out_opc_vfmina_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4113
+{
4114
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_D, vd, vj, vk));
4115
+}
4116
+
4117
+/* Emits the `vfcvt.h.s vd, vj, vk` instruction. */
4118
+static void __attribute__((unused))
4119
+tcg_out_opc_vfcvt_h_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4120
+{
4121
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_H_S, vd, vj, vk));
4122
+}
4123
+
4124
+/* Emits the `vfcvt.s.d vd, vj, vk` instruction. */
4125
+static void __attribute__((unused))
4126
+tcg_out_opc_vfcvt_s_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4127
+{
4128
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_S_D, vd, vj, vk));
4129
+}
4130
+
4131
+/* Emits the `vffint.s.l vd, vj, vk` instruction. */
4132
+static void __attribute__((unused))
4133
+tcg_out_opc_vffint_s_l(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4134
+{
4135
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFFINT_S_L, vd, vj, vk));
4136
+}
4137
+
4138
+/* Emits the `vftint.w.d vd, vj, vk` instruction. */
4139
+static void __attribute__((unused))
4140
+tcg_out_opc_vftint_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4141
+{
4142
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINT_W_D, vd, vj, vk));
4143
+}
4144
+
4145
+/* Emits the `vftintrm.w.d vd, vj, vk` instruction. */
4146
+static void __attribute__((unused))
4147
+tcg_out_opc_vftintrm_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4148
+{
4149
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRM_W_D, vd, vj, vk));
4150
+}
4151
+
4152
+/* Emits the `vftintrp.w.d vd, vj, vk` instruction. */
4153
+static void __attribute__((unused))
4154
+tcg_out_opc_vftintrp_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4155
+{
4156
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRP_W_D, vd, vj, vk));
4157
+}
4158
+
4159
+/* Emits the `vftintrz.w.d vd, vj, vk` instruction. */
4160
+static void __attribute__((unused))
4161
+tcg_out_opc_vftintrz_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4162
+{
4163
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRZ_W_D, vd, vj, vk));
4164
+}
4165
+
4166
+/* Emits the `vftintrne.w.d vd, vj, vk` instruction. */
4167
+static void __attribute__((unused))
4168
+tcg_out_opc_vftintrne_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4169
+{
4170
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRNE_W_D, vd, vj, vk));
4171
+}
4172
+
4173
+/* Emits the `vshuf.h vd, vj, vk` instruction. */
4174
+static void __attribute__((unused))
4175
+tcg_out_opc_vshuf_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4176
+{
4177
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_H, vd, vj, vk));
4178
+}
4179
+
4180
+/* Emits the `vshuf.w vd, vj, vk` instruction. */
4181
+static void __attribute__((unused))
4182
+tcg_out_opc_vshuf_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4183
+{
4184
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_W, vd, vj, vk));
4185
+}
4186
+
4187
+/* Emits the `vshuf.d vd, vj, vk` instruction. */
4188
+static void __attribute__((unused))
4189
+tcg_out_opc_vshuf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4190
+{
4191
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_D, vd, vj, vk));
4192
+}
4193
+
4194
+/* Emits the `vseqi.b vd, vj, sk5` instruction. */
4195
+static void __attribute__((unused))
4196
+tcg_out_opc_vseqi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4197
+{
4198
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_B, vd, vj, sk5));
4199
+}
4200
+
4201
+/* Emits the `vseqi.h vd, vj, sk5` instruction. */
4202
+static void __attribute__((unused))
4203
+tcg_out_opc_vseqi_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4204
+{
4205
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_H, vd, vj, sk5));
4206
+}
4207
+
4208
+/* Emits the `vseqi.w vd, vj, sk5` instruction. */
4209
+static void __attribute__((unused))
4210
+tcg_out_opc_vseqi_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4211
+{
4212
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_W, vd, vj, sk5));
4213
+}
4214
+
4215
+/* Emits the `vseqi.d vd, vj, sk5` instruction. */
4216
+static void __attribute__((unused))
4217
+tcg_out_opc_vseqi_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4218
+{
4219
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_D, vd, vj, sk5));
4220
+}
4221
+
4222
+/* Emits the `vslei.b vd, vj, sk5` instruction. */
4223
+static void __attribute__((unused))
4224
+tcg_out_opc_vslei_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4225
+{
4226
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_B, vd, vj, sk5));
4227
+}
4228
+
4229
+/* Emits the `vslei.h vd, vj, sk5` instruction. */
4230
+static void __attribute__((unused))
4231
+tcg_out_opc_vslei_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4232
+{
4233
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_H, vd, vj, sk5));
4234
+}
4235
+
4236
+/* Emits the `vslei.w vd, vj, sk5` instruction. */
4237
+static void __attribute__((unused))
4238
+tcg_out_opc_vslei_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4239
+{
4240
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_W, vd, vj, sk5));
4241
+}
4242
+
4243
+/* Emits the `vslei.d vd, vj, sk5` instruction. */
4244
+static void __attribute__((unused))
4245
+tcg_out_opc_vslei_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4246
+{
4247
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_D, vd, vj, sk5));
4248
+}
4249
+
4250
+/* Emits the `vslei.bu vd, vj, uk5` instruction. */
4251
+static void __attribute__((unused))
4252
+tcg_out_opc_vslei_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4253
+{
4254
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_BU, vd, vj, uk5));
4255
+}
4256
+
4257
+/* Emits the `vslei.hu vd, vj, uk5` instruction. */
4258
+static void __attribute__((unused))
4259
+tcg_out_opc_vslei_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4260
+{
4261
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_HU, vd, vj, uk5));
4262
+}
4263
+
4264
+/* Emits the `vslei.wu vd, vj, uk5` instruction. */
4265
+static void __attribute__((unused))
4266
+tcg_out_opc_vslei_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4267
+{
4268
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_WU, vd, vj, uk5));
4269
+}
4270
+
4271
+/* Emits the `vslei.du vd, vj, uk5` instruction. */
4272
+static void __attribute__((unused))
4273
+tcg_out_opc_vslei_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4274
+{
4275
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_DU, vd, vj, uk5));
4276
+}
4277
+
4278
+/* Emits the `vslti.b vd, vj, sk5` instruction. */
4279
+static void __attribute__((unused))
4280
+tcg_out_opc_vslti_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4281
+{
4282
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_B, vd, vj, sk5));
4283
+}
4284
+
4285
+/* Emits the `vslti.h vd, vj, sk5` instruction. */
4286
+static void __attribute__((unused))
4287
+tcg_out_opc_vslti_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4288
+{
4289
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_H, vd, vj, sk5));
4290
+}
4291
+
4292
+/* Emits the `vslti.w vd, vj, sk5` instruction. */
4293
+static void __attribute__((unused))
4294
+tcg_out_opc_vslti_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4295
+{
4296
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_W, vd, vj, sk5));
4297
+}
4298
+
4299
+/* Emits the `vslti.d vd, vj, sk5` instruction. */
4300
+static void __attribute__((unused))
4301
+tcg_out_opc_vslti_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4302
+{
4303
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_D, vd, vj, sk5));
4304
+}
4305
+
4306
+/* Emits the `vslti.bu vd, vj, uk5` instruction. */
4307
+static void __attribute__((unused))
4308
+tcg_out_opc_vslti_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4309
+{
4310
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_BU, vd, vj, uk5));
4311
+}
4312
+
4313
+/* Emits the `vslti.hu vd, vj, uk5` instruction. */
4314
+static void __attribute__((unused))
4315
+tcg_out_opc_vslti_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4316
+{
4317
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_HU, vd, vj, uk5));
4318
+}
4319
+
4320
+/* Emits the `vslti.wu vd, vj, uk5` instruction. */
4321
+static void __attribute__((unused))
4322
+tcg_out_opc_vslti_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4323
+{
4324
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_WU, vd, vj, uk5));
4325
+}
4326
+
4327
+/* Emits the `vslti.du vd, vj, uk5` instruction. */
4328
+static void __attribute__((unused))
4329
+tcg_out_opc_vslti_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4330
+{
4331
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_DU, vd, vj, uk5));
4332
+}
4333
+
4334
+/* Emits the `vaddi.bu vd, vj, uk5` instruction. */
4335
+static void __attribute__((unused))
4336
+tcg_out_opc_vaddi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4337
+{
4338
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_BU, vd, vj, uk5));
4339
+}
4340
+
4341
+/* Emits the `vaddi.hu vd, vj, uk5` instruction. */
4342
+static void __attribute__((unused))
4343
+tcg_out_opc_vaddi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4344
+{
4345
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_HU, vd, vj, uk5));
4346
+}
4347
+
4348
+/* Emits the `vaddi.wu vd, vj, uk5` instruction. */
4349
+static void __attribute__((unused))
4350
+tcg_out_opc_vaddi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4351
+{
4352
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_WU, vd, vj, uk5));
4353
+}
4354
+
4355
+/* Emits the `vaddi.du vd, vj, uk5` instruction. */
4356
+static void __attribute__((unused))
4357
+tcg_out_opc_vaddi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4358
+{
4359
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_DU, vd, vj, uk5));
4360
+}
4361
+
4362
+/* Emits the `vsubi.bu vd, vj, uk5` instruction. */
4363
+static void __attribute__((unused))
4364
+tcg_out_opc_vsubi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4365
+{
4366
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_BU, vd, vj, uk5));
4367
+}
4368
+
4369
+/* Emits the `vsubi.hu vd, vj, uk5` instruction. */
4370
+static void __attribute__((unused))
4371
+tcg_out_opc_vsubi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4372
+{
4373
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_HU, vd, vj, uk5));
4374
+}
4375
+
4376
+/* Emits the `vsubi.wu vd, vj, uk5` instruction. */
4377
+static void __attribute__((unused))
4378
+tcg_out_opc_vsubi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4379
+{
4380
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_WU, vd, vj, uk5));
4381
+}
4382
+
4383
+/* Emits the `vsubi.du vd, vj, uk5` instruction. */
4384
+static void __attribute__((unused))
4385
+tcg_out_opc_vsubi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4386
+{
4387
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_DU, vd, vj, uk5));
4388
+}
4389
+
4390
+/* Emits the `vbsll.v vd, vj, uk5` instruction. */
4391
+static void __attribute__((unused))
4392
+tcg_out_opc_vbsll_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4393
+{
4394
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSLL_V, vd, vj, uk5));
4395
+}
4396
+
4397
+/* Emits the `vbsrl.v vd, vj, uk5` instruction. */
4398
+static void __attribute__((unused))
4399
+tcg_out_opc_vbsrl_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4400
+{
4401
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSRL_V, vd, vj, uk5));
4402
+}
4403
+
4404
+/* Emits the `vmaxi.b vd, vj, sk5` instruction. */
4405
+static void __attribute__((unused))
4406
+tcg_out_opc_vmaxi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4407
+{
4408
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_B, vd, vj, sk5));
4409
+}
4410
+
4411
+/* Emits the `vmaxi.h vd, vj, sk5` instruction. */
4412
+static void __attribute__((unused))
4413
+tcg_out_opc_vmaxi_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4414
+{
4415
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_H, vd, vj, sk5));
4416
+}
4417
+
4418
+/* Emits the `vmaxi.w vd, vj, sk5` instruction. */
4419
+static void __attribute__((unused))
4420
+tcg_out_opc_vmaxi_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4421
+{
4422
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_W, vd, vj, sk5));
4423
+}
4424
+
4425
+/* Emits the `vmaxi.d vd, vj, sk5` instruction. */
4426
+static void __attribute__((unused))
4427
+tcg_out_opc_vmaxi_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4428
+{
4429
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_D, vd, vj, sk5));
4430
+}
4431
+
4432
+/* Emits the `vmini.b vd, vj, sk5` instruction. */
4433
+static void __attribute__((unused))
4434
+tcg_out_opc_vmini_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4435
+{
4436
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_B, vd, vj, sk5));
4437
+}
4438
+
4439
+/* Emits the `vmini.h vd, vj, sk5` instruction. */
4440
+static void __attribute__((unused))
4441
+tcg_out_opc_vmini_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4442
+{
4443
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_H, vd, vj, sk5));
4444
+}
4445
+
4446
+/* Emits the `vmini.w vd, vj, sk5` instruction. */
4447
+static void __attribute__((unused))
4448
+tcg_out_opc_vmini_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4449
+{
4450
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_W, vd, vj, sk5));
4451
+}
4452
+
4453
+/* Emits the `vmini.d vd, vj, sk5` instruction. */
4454
+static void __attribute__((unused))
4455
+tcg_out_opc_vmini_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4456
+{
4457
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_D, vd, vj, sk5));
4458
+}
4459
+
4460
+/* Emits the `vmaxi.bu vd, vj, uk5` instruction. */
4461
+static void __attribute__((unused))
4462
+tcg_out_opc_vmaxi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4463
+{
4464
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_BU, vd, vj, uk5));
4465
+}
4466
+
4467
+/* Emits the `vmaxi.hu vd, vj, uk5` instruction. */
4468
+static void __attribute__((unused))
4469
+tcg_out_opc_vmaxi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4470
+{
4471
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_HU, vd, vj, uk5));
4472
+}
4473
+
4474
+/* Emits the `vmaxi.wu vd, vj, uk5` instruction. */
4475
+static void __attribute__((unused))
4476
+tcg_out_opc_vmaxi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4477
+{
4478
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_WU, vd, vj, uk5));
4479
+}
4480
+
4481
+/* Emits the `vmaxi.du vd, vj, uk5` instruction. */
4482
+static void __attribute__((unused))
4483
+tcg_out_opc_vmaxi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4484
+{
4485
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_DU, vd, vj, uk5));
4486
+}
4487
+
4488
+/* Emits the `vmini.bu vd, vj, uk5` instruction. */
4489
+static void __attribute__((unused))
4490
+tcg_out_opc_vmini_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4491
+{
4492
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_BU, vd, vj, uk5));
4493
+}
4494
+
4495
+/* Emits the `vmini.hu vd, vj, uk5` instruction. */
4496
+static void __attribute__((unused))
4497
+tcg_out_opc_vmini_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4498
+{
4499
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_HU, vd, vj, uk5));
4500
+}
4501
+
4502
+/* Emits the `vmini.wu vd, vj, uk5` instruction. */
4503
+static void __attribute__((unused))
4504
+tcg_out_opc_vmini_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4505
+{
4506
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_WU, vd, vj, uk5));
4507
+}
4508
+
4509
+/* Emits the `vmini.du vd, vj, uk5` instruction. */
4510
+static void __attribute__((unused))
4511
+tcg_out_opc_vmini_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4512
+{
4513
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_DU, vd, vj, uk5));
4514
+}
4515
+
4516
+/* Emits the `vfrstpi.b vd, vj, uk5` instruction. */
4517
+static void __attribute__((unused))
4518
+tcg_out_opc_vfrstpi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4519
+{
4520
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_B, vd, vj, uk5));
4521
+}
4522
+
4523
+/* Emits the `vfrstpi.h vd, vj, uk5` instruction. */
4524
+static void __attribute__((unused))
4525
+tcg_out_opc_vfrstpi_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4526
+{
4527
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_H, vd, vj, uk5));
4528
+}
4529
+
4530
+/* Emits the `vclo.b vd, vj` instruction. */
4531
+static void __attribute__((unused))
4532
+tcg_out_opc_vclo_b(TCGContext *s, TCGReg vd, TCGReg vj)
4533
+{
4534
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_B, vd, vj));
4535
+}
4536
+
4537
+/* Emits the `vclo.h vd, vj` instruction. */
4538
+static void __attribute__((unused))
4539
+tcg_out_opc_vclo_h(TCGContext *s, TCGReg vd, TCGReg vj)
4540
+{
4541
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_H, vd, vj));
4542
+}
4543
+
4544
+/* Emits the `vclo.w vd, vj` instruction. */
4545
+static void __attribute__((unused))
4546
+tcg_out_opc_vclo_w(TCGContext *s, TCGReg vd, TCGReg vj)
4547
+{
4548
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_W, vd, vj));
4549
+}
4550
+
4551
+/* Emits the `vclo.d vd, vj` instruction. */
4552
+static void __attribute__((unused))
4553
+tcg_out_opc_vclo_d(TCGContext *s, TCGReg vd, TCGReg vj)
4554
+{
4555
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_D, vd, vj));
4556
+}
4557
+
4558
+/* Emits the `vclz.b vd, vj` instruction. */
4559
+static void __attribute__((unused))
4560
+tcg_out_opc_vclz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4561
+{
4562
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_B, vd, vj));
4563
+}
4564
+
4565
+/* Emits the `vclz.h vd, vj` instruction. */
4566
+static void __attribute__((unused))
4567
+tcg_out_opc_vclz_h(TCGContext *s, TCGReg vd, TCGReg vj)
4568
+{
4569
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_H, vd, vj));
4570
+}
4571
+
4572
+/* Emits the `vclz.w vd, vj` instruction. */
4573
+static void __attribute__((unused))
4574
+tcg_out_opc_vclz_w(TCGContext *s, TCGReg vd, TCGReg vj)
4575
+{
4576
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_W, vd, vj));
4577
+}
4578
+
4579
+/* Emits the `vclz.d vd, vj` instruction. */
4580
+static void __attribute__((unused))
4581
+tcg_out_opc_vclz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4582
+{
4583
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_D, vd, vj));
4584
+}
4585
+
4586
+/* Emits the `vpcnt.b vd, vj` instruction. */
4587
+static void __attribute__((unused))
4588
+tcg_out_opc_vpcnt_b(TCGContext *s, TCGReg vd, TCGReg vj)
4589
+{
4590
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_B, vd, vj));
4591
+}
4592
+
4593
+/* Emits the `vpcnt.h vd, vj` instruction. */
4594
+static void __attribute__((unused))
4595
+tcg_out_opc_vpcnt_h(TCGContext *s, TCGReg vd, TCGReg vj)
4596
+{
4597
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_H, vd, vj));
4598
+}
4599
+
4600
+/* Emits the `vpcnt.w vd, vj` instruction. */
4601
+static void __attribute__((unused))
4602
+tcg_out_opc_vpcnt_w(TCGContext *s, TCGReg vd, TCGReg vj)
4603
+{
4604
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_W, vd, vj));
4605
+}
4606
+
4607
+/* Emits the `vpcnt.d vd, vj` instruction. */
4608
+static void __attribute__((unused))
4609
+tcg_out_opc_vpcnt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4610
+{
4611
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_D, vd, vj));
4612
+}
4613
+
4614
+/* Emits the `vneg.b vd, vj` instruction. */
4615
+static void __attribute__((unused))
4616
+tcg_out_opc_vneg_b(TCGContext *s, TCGReg vd, TCGReg vj)
4617
+{
4618
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_B, vd, vj));
4619
+}
4620
+
4621
+/* Emits the `vneg.h vd, vj` instruction. */
4622
+static void __attribute__((unused))
4623
+tcg_out_opc_vneg_h(TCGContext *s, TCGReg vd, TCGReg vj)
4624
+{
4625
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_H, vd, vj));
4626
+}
4627
+
4628
+/* Emits the `vneg.w vd, vj` instruction. */
4629
+static void __attribute__((unused))
4630
+tcg_out_opc_vneg_w(TCGContext *s, TCGReg vd, TCGReg vj)
4631
+{
4632
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_W, vd, vj));
4633
+}
4634
+
4635
+/* Emits the `vneg.d vd, vj` instruction. */
4636
+static void __attribute__((unused))
4637
+tcg_out_opc_vneg_d(TCGContext *s, TCGReg vd, TCGReg vj)
4638
+{
4639
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_D, vd, vj));
4640
+}
4641
+
4642
+/* Emits the `vmskltz.b vd, vj` instruction. */
4643
+static void __attribute__((unused))
4644
+tcg_out_opc_vmskltz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4645
+{
4646
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_B, vd, vj));
4647
+}
4648
+
4649
+/* Emits the `vmskltz.h vd, vj` instruction. */
4650
+static void __attribute__((unused))
4651
+tcg_out_opc_vmskltz_h(TCGContext *s, TCGReg vd, TCGReg vj)
4652
+{
4653
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_H, vd, vj));
4654
+}
4655
+
4656
+/* Emits the `vmskltz.w vd, vj` instruction. */
4657
+static void __attribute__((unused))
4658
+tcg_out_opc_vmskltz_w(TCGContext *s, TCGReg vd, TCGReg vj)
4659
+{
4660
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_W, vd, vj));
4661
+}
4662
+
4663
+/* Emits the `vmskltz.d vd, vj` instruction. */
4664
+static void __attribute__((unused))
4665
+tcg_out_opc_vmskltz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4666
+{
4667
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_D, vd, vj));
4668
+}
4669
+
4670
+/* Emits the `vmskgez.b vd, vj` instruction. */
4671
+static void __attribute__((unused))
4672
+tcg_out_opc_vmskgez_b(TCGContext *s, TCGReg vd, TCGReg vj)
4673
+{
4674
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKGEZ_B, vd, vj));
4675
+}
4676
+
4677
+/* Emits the `vmsknz.b vd, vj` instruction. */
4678
+static void __attribute__((unused))
4679
+tcg_out_opc_vmsknz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4680
+{
4681
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKNZ_B, vd, vj));
4682
+}
4683
+
4684
+/* Emits the `vseteqz.v cd, vj` instruction. */
4685
+static void __attribute__((unused))
4686
+tcg_out_opc_vseteqz_v(TCGContext *s, TCGReg cd, TCGReg vj)
4687
+{
4688
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETEQZ_V, cd, vj));
4689
+}
4690
+
4691
+/* Emits the `vsetnez.v cd, vj` instruction. */
4692
+static void __attribute__((unused))
4693
+tcg_out_opc_vsetnez_v(TCGContext *s, TCGReg cd, TCGReg vj)
4694
+{
4695
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETNEZ_V, cd, vj));
4696
+}
4697
+
4698
+/* Emits the `vsetanyeqz.b cd, vj` instruction. */
4699
+static void __attribute__((unused))
4700
+tcg_out_opc_vsetanyeqz_b(TCGContext *s, TCGReg cd, TCGReg vj)
4701
+{
4702
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_B, cd, vj));
4703
+}
4704
+
4705
+/* Emits the `vsetanyeqz.h cd, vj` instruction. */
4706
+static void __attribute__((unused))
4707
+tcg_out_opc_vsetanyeqz_h(TCGContext *s, TCGReg cd, TCGReg vj)
4708
+{
4709
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_H, cd, vj));
4710
+}
4711
+
4712
+/* Emits the `vsetanyeqz.w cd, vj` instruction. */
4713
+static void __attribute__((unused))
4714
+tcg_out_opc_vsetanyeqz_w(TCGContext *s, TCGReg cd, TCGReg vj)
4715
+{
4716
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_W, cd, vj));
4717
+}
4718
+
4719
+/* Emits the `vsetanyeqz.d cd, vj` instruction. */
4720
+static void __attribute__((unused))
4721
+tcg_out_opc_vsetanyeqz_d(TCGContext *s, TCGReg cd, TCGReg vj)
4722
+{
4723
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_D, cd, vj));
4724
+}
4725
+
4726
+/* Emits the `vsetallnez.b cd, vj` instruction. */
4727
+static void __attribute__((unused))
4728
+tcg_out_opc_vsetallnez_b(TCGContext *s, TCGReg cd, TCGReg vj)
4729
+{
4730
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_B, cd, vj));
4731
+}
4732
+
4733
+/* Emits the `vsetallnez.h cd, vj` instruction. */
4734
+static void __attribute__((unused))
4735
+tcg_out_opc_vsetallnez_h(TCGContext *s, TCGReg cd, TCGReg vj)
4736
+{
4737
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_H, cd, vj));
4738
+}
4739
+
4740
+/* Emits the `vsetallnez.w cd, vj` instruction. */
4741
+static void __attribute__((unused))
4742
+tcg_out_opc_vsetallnez_w(TCGContext *s, TCGReg cd, TCGReg vj)
4743
+{
4744
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_W, cd, vj));
4745
+}
4746
+
4747
+/* Emits the `vsetallnez.d cd, vj` instruction. */
4748
+static void __attribute__((unused))
4749
+tcg_out_opc_vsetallnez_d(TCGContext *s, TCGReg cd, TCGReg vj)
4750
+{
4751
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_D, cd, vj));
4752
+}
4753
+
4754
+/* Emits the `vflogb.s vd, vj` instruction. */
4755
+static void __attribute__((unused))
4756
+tcg_out_opc_vflogb_s(TCGContext *s, TCGReg vd, TCGReg vj)
4757
+{
4758
+ tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_S, vd, vj));
4759
+}
4760
+
4761
+/* Emits the `vflogb.d vd, vj` instruction. */
4762
+static void __attribute__((unused))
4763
+tcg_out_opc_vflogb_d(TCGContext *s, TCGReg vd, TCGReg vj)
4764
+{
4765
+ tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_D, vd, vj));
4766
+}
4767
+
4768
+/* Emits the `vfclass.s vd, vj` instruction. */
4769
+static void __attribute__((unused))
4770
+tcg_out_opc_vfclass_s(TCGContext *s, TCGReg vd, TCGReg vj)
4771
+{
4772
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_S, vd, vj));
4773
+}
4774
+
4775
+/* Emits the `vfclass.d vd, vj` instruction. */
4776
+static void __attribute__((unused))
4777
+tcg_out_opc_vfclass_d(TCGContext *s, TCGReg vd, TCGReg vj)
4778
+{
4779
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_D, vd, vj));
4780
+}
4781
+
4782
+/* Emits the `vfsqrt.s vd, vj` instruction. */
4783
+static void __attribute__((unused))
4784
+tcg_out_opc_vfsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
4785
+{
4786
+ tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_S, vd, vj));
4787
+}
4788
+
4789
+/* Emits the `vfsqrt.d vd, vj` instruction. */
4790
+static void __attribute__((unused))
4791
+tcg_out_opc_vfsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4792
+{
4793
+ tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_D, vd, vj));
4794
+}
4795
+
4796
+/* Emits the `vfrecip.s vd, vj` instruction. */
4797
+static void __attribute__((unused))
4798
+tcg_out_opc_vfrecip_s(TCGContext *s, TCGReg vd, TCGReg vj)
4799
+{
4800
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_S, vd, vj));
4801
+}
4802
+
4803
+/* Emits the `vfrecip.d vd, vj` instruction. */
4804
+static void __attribute__((unused))
4805
+tcg_out_opc_vfrecip_d(TCGContext *s, TCGReg vd, TCGReg vj)
4806
+{
4807
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_D, vd, vj));
4808
+}
4809
+
4810
+/* Emits the `vfrsqrt.s vd, vj` instruction. */
4811
+static void __attribute__((unused))
4812
+tcg_out_opc_vfrsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
4813
+{
4814
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_S, vd, vj));
4815
+}
4816
+
4817
+/* Emits the `vfrsqrt.d vd, vj` instruction. */
4818
+static void __attribute__((unused))
4819
+tcg_out_opc_vfrsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4820
+{
4821
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_D, vd, vj));
4822
+}
4823
+
4824
+/* Emits the `vfrint.s vd, vj` instruction. */
4825
+static void __attribute__((unused))
4826
+tcg_out_opc_vfrint_s(TCGContext *s, TCGReg vd, TCGReg vj)
4827
+{
4828
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_S, vd, vj));
4829
+}
4830
+
4831
+/* Emits the `vfrint.d vd, vj` instruction. */
4832
+static void __attribute__((unused))
4833
+tcg_out_opc_vfrint_d(TCGContext *s, TCGReg vd, TCGReg vj)
4834
+{
4835
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_D, vd, vj));
4836
+}
4837
+
4838
+/* Emits the `vfrintrm.s vd, vj` instruction. */
4839
+static void __attribute__((unused))
4840
+tcg_out_opc_vfrintrm_s(TCGContext *s, TCGReg vd, TCGReg vj)
4841
+{
4842
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_S, vd, vj));
4843
+}
4844
+
4845
+/* Emits the `vfrintrm.d vd, vj` instruction. */
4846
+static void __attribute__((unused))
4847
+tcg_out_opc_vfrintrm_d(TCGContext *s, TCGReg vd, TCGReg vj)
4848
+{
4849
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_D, vd, vj));
4850
+}
4851
+
4852
+/* Emits the `vfrintrp.s vd, vj` instruction. */
4853
+static void __attribute__((unused))
4854
+tcg_out_opc_vfrintrp_s(TCGContext *s, TCGReg vd, TCGReg vj)
4855
+{
4856
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_S, vd, vj));
4857
+}
4858
+
4859
+/* Emits the `vfrintrp.d vd, vj` instruction. */
4860
+static void __attribute__((unused))
4861
+tcg_out_opc_vfrintrp_d(TCGContext *s, TCGReg vd, TCGReg vj)
4862
+{
4863
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_D, vd, vj));
4864
+}
4865
+
4866
+/* Emits the `vfrintrz.s vd, vj` instruction. */
4867
+static void __attribute__((unused))
4868
+tcg_out_opc_vfrintrz_s(TCGContext *s, TCGReg vd, TCGReg vj)
4869
+{
4870
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_S, vd, vj));
4871
+}
4872
+
4873
+/* Emits the `vfrintrz.d vd, vj` instruction. */
4874
+static void __attribute__((unused))
4875
+tcg_out_opc_vfrintrz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4876
+{
4877
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_D, vd, vj));
4878
+}
4879
+
4880
+/* Emits the `vfrintrne.s vd, vj` instruction. */
4881
+static void __attribute__((unused))
4882
+tcg_out_opc_vfrintrne_s(TCGContext *s, TCGReg vd, TCGReg vj)
4883
+{
4884
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_S, vd, vj));
4885
+}
4886
+
4887
+/* Emits the `vfrintrne.d vd, vj` instruction. */
4888
+static void __attribute__((unused))
4889
+tcg_out_opc_vfrintrne_d(TCGContext *s, TCGReg vd, TCGReg vj)
4890
+{
4891
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_D, vd, vj));
4892
+}
4893
+
4894
+/* Emits the `vfcvtl.s.h vd, vj` instruction. */
4895
+static void __attribute__((unused))
4896
+tcg_out_opc_vfcvtl_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
4897
+{
4898
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_S_H, vd, vj));
4899
+}
4900
+
4901
+/* Emits the `vfcvth.s.h vd, vj` instruction. */
4902
+static void __attribute__((unused))
4903
+tcg_out_opc_vfcvth_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
4904
+{
4905
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_S_H, vd, vj));
4906
+}
4907
+
4908
+/* Emits the `vfcvtl.d.s vd, vj` instruction. */
4909
+static void __attribute__((unused))
4910
+tcg_out_opc_vfcvtl_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
4911
+{
4912
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_D_S, vd, vj));
4913
+}
4914
+
4915
+/* Emits the `vfcvth.d.s vd, vj` instruction. */
4916
+static void __attribute__((unused))
4917
+tcg_out_opc_vfcvth_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
4918
+{
4919
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_D_S, vd, vj));
4920
+}
4921
+
4922
+/* Emits the `vffint.s.w vd, vj` instruction. */
4923
+static void __attribute__((unused))
4924
+tcg_out_opc_vffint_s_w(TCGContext *s, TCGReg vd, TCGReg vj)
4925
+{
4926
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_W, vd, vj));
4927
+}
4928
+
4929
+/* Emits the `vffint.s.wu vd, vj` instruction. */
4930
+static void __attribute__((unused))
4931
+tcg_out_opc_vffint_s_wu(TCGContext *s, TCGReg vd, TCGReg vj)
4932
+{
4933
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_WU, vd, vj));
4934
+}
4935
+
4936
+/* Emits the `vffint.d.l vd, vj` instruction. */
4937
+static void __attribute__((unused))
4938
+tcg_out_opc_vffint_d_l(TCGContext *s, TCGReg vd, TCGReg vj)
4939
+{
4940
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_L, vd, vj));
4941
+}
4942
+
4943
+/* Emits the `vffint.d.lu vd, vj` instruction. */
4944
+static void __attribute__((unused))
4945
+tcg_out_opc_vffint_d_lu(TCGContext *s, TCGReg vd, TCGReg vj)
4946
+{
4947
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_LU, vd, vj));
4948
+}
4949
+
4950
+/* Emits the `vffintl.d.w vd, vj` instruction. */
4951
+static void __attribute__((unused))
4952
+tcg_out_opc_vffintl_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
4953
+{
4954
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINTL_D_W, vd, vj));
4955
+}
4956
+
4957
+/* Emits the `vffinth.d.w vd, vj` instruction. */
4958
+static void __attribute__((unused))
4959
+tcg_out_opc_vffinth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
4960
+{
4961
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINTH_D_W, vd, vj));
4962
+}
4963
+
4964
+/* Emits the `vftint.w.s vd, vj` instruction. */
4965
+static void __attribute__((unused))
4966
+tcg_out_opc_vftint_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4967
+{
4968
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_W_S, vd, vj));
4969
+}
4970
+
4971
+/* Emits the `vftint.l.d vd, vj` instruction. */
4972
+static void __attribute__((unused))
4973
+tcg_out_opc_vftint_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4974
+{
4975
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_L_D, vd, vj));
4976
+}
4977
+
4978
+/* Emits the `vftintrm.w.s vd, vj` instruction. */
4979
+static void __attribute__((unused))
4980
+tcg_out_opc_vftintrm_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4981
+{
4982
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_W_S, vd, vj));
4983
+}
4984
+
4985
+/* Emits the `vftintrm.l.d vd, vj` instruction. */
4986
+static void __attribute__((unused))
4987
+tcg_out_opc_vftintrm_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4988
+{
4989
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_L_D, vd, vj));
4990
+}
4991
+
4992
+/* Emits the `vftintrp.w.s vd, vj` instruction. */
4993
+static void __attribute__((unused))
4994
+tcg_out_opc_vftintrp_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4995
+{
4996
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_W_S, vd, vj));
4997
+}
4998
+
4999
+/* Emits the `vftintrp.l.d vd, vj` instruction. */
5000
+static void __attribute__((unused))
5001
+tcg_out_opc_vftintrp_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
5002
+{
5003
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_L_D, vd, vj));
5004
+}
5005
+
5006
+/* Emits the `vftintrz.w.s vd, vj` instruction. */
5007
+static void __attribute__((unused))
5008
+tcg_out_opc_vftintrz_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
5009
+{
5010
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_W_S, vd, vj));
5011
+}
5012
+
5013
+/* Emits the `vftintrz.l.d vd, vj` instruction. */
5014
+static void __attribute__((unused))
5015
+tcg_out_opc_vftintrz_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
5016
+{
5017
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_L_D, vd, vj));
5018
+}
5019
+
5020
+/* Emits the `vftintrne.w.s vd, vj` instruction. */
5021
+static void __attribute__((unused))
5022
+tcg_out_opc_vftintrne_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
5023
+{
5024
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_W_S, vd, vj));
5025
+}
5026
+
5027
+/* Emits the `vftintrne.l.d vd, vj` instruction. */
5028
+static void __attribute__((unused))
5029
+tcg_out_opc_vftintrne_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
5030
+{
5031
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_L_D, vd, vj));
5032
+}
5033
+
5034
+/* Emits the `vftint.wu.s vd, vj` instruction. */
5035
+static void __attribute__((unused))
5036
+tcg_out_opc_vftint_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
5037
+{
5038
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_WU_S, vd, vj));
5039
+}
5040
+
5041
+/* Emits the `vftint.lu.d vd, vj` instruction. */
5042
+static void __attribute__((unused))
5043
+tcg_out_opc_vftint_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
5044
+{
5045
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_LU_D, vd, vj));
5046
+}
5047
+
5048
+/* Emits the `vftintrz.wu.s vd, vj` instruction. */
5049
+static void __attribute__((unused))
5050
+tcg_out_opc_vftintrz_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
5051
+{
5052
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_WU_S, vd, vj));
5053
+}
5054
+
5055
+/* Emits the `vftintrz.lu.d vd, vj` instruction. */
5056
+static void __attribute__((unused))
5057
+tcg_out_opc_vftintrz_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
5058
+{
5059
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_LU_D, vd, vj));
5060
+}
5061
+
5062
+/* Emits the `vftintl.l.s vd, vj` instruction. */
5063
+static void __attribute__((unused))
5064
+tcg_out_opc_vftintl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5065
+{
5066
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTL_L_S, vd, vj));
5067
+}
5068
+
5069
+/* Emits the `vftinth.l.s vd, vj` instruction. */
5070
+static void __attribute__((unused))
5071
+tcg_out_opc_vftinth_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5072
+{
5073
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTH_L_S, vd, vj));
5074
+}
5075
+
5076
+/* Emits the `vftintrml.l.s vd, vj` instruction. */
5077
+static void __attribute__((unused))
5078
+tcg_out_opc_vftintrml_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5079
+{
5080
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRML_L_S, vd, vj));
5081
+}
5082
+
5083
+/* Emits the `vftintrmh.l.s vd, vj` instruction. */
5084
+static void __attribute__((unused))
5085
+tcg_out_opc_vftintrmh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5086
+{
5087
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRMH_L_S, vd, vj));
5088
+}
5089
+
5090
+/* Emits the `vftintrpl.l.s vd, vj` instruction. */
5091
+static void __attribute__((unused))
5092
+tcg_out_opc_vftintrpl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5093
+{
5094
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPL_L_S, vd, vj));
5095
+}
5096
+
5097
+/* Emits the `vftintrph.l.s vd, vj` instruction. */
5098
+static void __attribute__((unused))
5099
+tcg_out_opc_vftintrph_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5100
+{
5101
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPH_L_S, vd, vj));
5102
+}
5103
+
5104
+/* Emits the `vftintrzl.l.s vd, vj` instruction. */
5105
+static void __attribute__((unused))
5106
+tcg_out_opc_vftintrzl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5107
+{
5108
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZL_L_S, vd, vj));
5109
+}
5110
+
5111
+/* Emits the `vftintrzh.l.s vd, vj` instruction. */
5112
+static void __attribute__((unused))
5113
+tcg_out_opc_vftintrzh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5114
+{
5115
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZH_L_S, vd, vj));
5116
+}
5117
+
5118
+/* Emits the `vftintrnel.l.s vd, vj` instruction. */
5119
+static void __attribute__((unused))
5120
+tcg_out_opc_vftintrnel_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5121
+{
5122
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEL_L_S, vd, vj));
5123
+}
5124
+
5125
+/* Emits the `vftintrneh.l.s vd, vj` instruction. */
5126
+static void __attribute__((unused))
5127
+tcg_out_opc_vftintrneh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5128
+{
5129
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEH_L_S, vd, vj));
5130
+}
5131
+
5132
+/* Emits the `vexth.h.b vd, vj` instruction. */
5133
+static void __attribute__((unused))
5134
+tcg_out_opc_vexth_h_b(TCGContext *s, TCGReg vd, TCGReg vj)
5135
+{
5136
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_H_B, vd, vj));
5137
+}
5138
+
5139
+/* Emits the `vexth.w.h vd, vj` instruction. */
5140
+static void __attribute__((unused))
5141
+tcg_out_opc_vexth_w_h(TCGContext *s, TCGReg vd, TCGReg vj)
5142
+{
5143
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_W_H, vd, vj));
5144
+}
5145
+
5146
+/* Emits the `vexth.d.w vd, vj` instruction. */
5147
+static void __attribute__((unused))
5148
+tcg_out_opc_vexth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
5149
+{
5150
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_D_W, vd, vj));
5151
+}
5152
+
5153
+/* Emits the `vexth.q.d vd, vj` instruction. */
5154
+static void __attribute__((unused))
5155
+tcg_out_opc_vexth_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
5156
+{
5157
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_Q_D, vd, vj));
5158
+}
5159
+
5160
+/* Emits the `vexth.hu.bu vd, vj` instruction. */
5161
+static void __attribute__((unused))
5162
+tcg_out_opc_vexth_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj)
5163
+{
5164
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_HU_BU, vd, vj));
5165
+}
5166
+
5167
+/* Emits the `vexth.wu.hu vd, vj` instruction. */
5168
+static void __attribute__((unused))
5169
+tcg_out_opc_vexth_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj)
5170
+{
5171
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_WU_HU, vd, vj));
5172
+}
5173
+
5174
+/* Emits the `vexth.du.wu vd, vj` instruction. */
5175
+static void __attribute__((unused))
5176
+tcg_out_opc_vexth_du_wu(TCGContext *s, TCGReg vd, TCGReg vj)
5177
+{
5178
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_DU_WU, vd, vj));
5179
+}
5180
+
5181
+/* Emits the `vexth.qu.du vd, vj` instruction. */
5182
+static void __attribute__((unused))
5183
+tcg_out_opc_vexth_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
5184
+{
5185
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_QU_DU, vd, vj));
5186
+}
5187
+
5188
+/* Emits the `vreplgr2vr.b vd, j` instruction. */
5189
+static void __attribute__((unused))
5190
+tcg_out_opc_vreplgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j)
5191
+{
5192
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_B, vd, j));
5193
+}
5194
+
5195
+/* Emits the `vreplgr2vr.h vd, j` instruction. */
5196
+static void __attribute__((unused))
5197
+tcg_out_opc_vreplgr2vr_h(TCGContext *s, TCGReg vd, TCGReg j)
5198
+{
5199
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_H, vd, j));
5200
+}
5201
+
5202
+/* Emits the `vreplgr2vr.w vd, j` instruction. */
5203
+static void __attribute__((unused))
5204
+tcg_out_opc_vreplgr2vr_w(TCGContext *s, TCGReg vd, TCGReg j)
5205
+{
5206
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_W, vd, j));
5207
+}
5208
+
5209
+/* Emits the `vreplgr2vr.d vd, j` instruction. */
5210
+static void __attribute__((unused))
5211
+tcg_out_opc_vreplgr2vr_d(TCGContext *s, TCGReg vd, TCGReg j)
5212
+{
5213
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_D, vd, j));
5214
+}
5215
+
5216
+/* Emits the `vrotri.b vd, vj, uk3` instruction. */
5217
+static void __attribute__((unused))
5218
+tcg_out_opc_vrotri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5219
+{
5220
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VROTRI_B, vd, vj, uk3));
5221
+}
5222
+
5223
+/* Emits the `vrotri.h vd, vj, uk4` instruction. */
5224
+static void __attribute__((unused))
5225
+tcg_out_opc_vrotri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5226
+{
5227
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VROTRI_H, vd, vj, uk4));
5228
+}
5229
+
5230
+/* Emits the `vrotri.w vd, vj, uk5` instruction. */
5231
+static void __attribute__((unused))
5232
+tcg_out_opc_vrotri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5233
+{
5234
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VROTRI_W, vd, vj, uk5));
5235
+}
5236
+
5237
+/* Emits the `vrotri.d vd, vj, uk6` instruction. */
5238
+static void __attribute__((unused))
5239
+tcg_out_opc_vrotri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5240
+{
5241
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VROTRI_D, vd, vj, uk6));
5242
+}
5243
+
5244
+/* Emits the `vsrlri.b vd, vj, uk3` instruction. */
5245
+static void __attribute__((unused))
5246
+tcg_out_opc_vsrlri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5247
+{
5248
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRLRI_B, vd, vj, uk3));
5249
+}
5250
+
5251
+/* Emits the `vsrlri.h vd, vj, uk4` instruction. */
5252
+static void __attribute__((unused))
5253
+tcg_out_opc_vsrlri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5254
+{
5255
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRI_H, vd, vj, uk4));
5256
+}
5257
+
5258
+/* Emits the `vsrlri.w vd, vj, uk5` instruction. */
5259
+static void __attribute__((unused))
5260
+tcg_out_opc_vsrlri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5261
+{
5262
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRI_W, vd, vj, uk5));
5263
+}
5264
+
5265
+/* Emits the `vsrlri.d vd, vj, uk6` instruction. */
5266
+static void __attribute__((unused))
5267
+tcg_out_opc_vsrlri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5268
+{
5269
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRI_D, vd, vj, uk6));
5270
+}
5271
+
5272
+/* Emits the `vsrari.b vd, vj, uk3` instruction. */
5273
+static void __attribute__((unused))
5274
+tcg_out_opc_vsrari_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5275
+{
5276
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRARI_B, vd, vj, uk3));
5277
+}
5278
+
5279
+/* Emits the `vsrari.h vd, vj, uk4` instruction. */
5280
+static void __attribute__((unused))
5281
+tcg_out_opc_vsrari_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5282
+{
5283
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARI_H, vd, vj, uk4));
5284
+}
5285
+
5286
+/* Emits the `vsrari.w vd, vj, uk5` instruction. */
5287
+static void __attribute__((unused))
5288
+tcg_out_opc_vsrari_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5289
+{
5290
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARI_W, vd, vj, uk5));
5291
+}
5292
+
5293
+/* Emits the `vsrari.d vd, vj, uk6` instruction. */
5294
+static void __attribute__((unused))
5295
+tcg_out_opc_vsrari_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5296
+{
5297
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARI_D, vd, vj, uk6));
5298
+}
5299
+
5300
+/* Emits the `vinsgr2vr.b vd, j, uk4` instruction. */
5301
+static void __attribute__((unused))
5302
+tcg_out_opc_vinsgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk4)
5303
+{
5304
+ tcg_out32(s, encode_vdjuk4_insn(OPC_VINSGR2VR_B, vd, j, uk4));
5305
+}
5306
+
5307
+/* Emits the `vinsgr2vr.h vd, j, uk3` instruction. */
5308
+static void __attribute__((unused))
5309
+tcg_out_opc_vinsgr2vr_h(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk3)
5310
+{
5311
+ tcg_out32(s, encode_vdjuk3_insn(OPC_VINSGR2VR_H, vd, j, uk3));
5312
+}
5313
+
5314
+/* Emits the `vinsgr2vr.w vd, j, uk2` instruction. */
5315
+static void __attribute__((unused))
5316
+tcg_out_opc_vinsgr2vr_w(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk2)
5317
+{
5318
+ tcg_out32(s, encode_vdjuk2_insn(OPC_VINSGR2VR_W, vd, j, uk2));
5319
+}
5320
+
5321
+/* Emits the `vinsgr2vr.d vd, j, uk1` instruction. */
5322
+static void __attribute__((unused))
5323
+tcg_out_opc_vinsgr2vr_d(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk1)
5324
+{
5325
+ tcg_out32(s, encode_vdjuk1_insn(OPC_VINSGR2VR_D, vd, j, uk1));
5326
+}
5327
+
5328
+/* Emits the `vpickve2gr.b d, vj, uk4` instruction. */
5329
+static void __attribute__((unused))
5330
+tcg_out_opc_vpickve2gr_b(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk4)
5331
+{
5332
+ tcg_out32(s, encode_dvjuk4_insn(OPC_VPICKVE2GR_B, d, vj, uk4));
5333
+}
5334
+
5335
+/* Emits the `vpickve2gr.h d, vj, uk3` instruction. */
5336
+static void __attribute__((unused))
5337
+tcg_out_opc_vpickve2gr_h(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk3)
5338
+{
5339
+ tcg_out32(s, encode_dvjuk3_insn(OPC_VPICKVE2GR_H, d, vj, uk3));
5340
+}
5341
+
5342
+/* Emits the `vpickve2gr.w d, vj, uk2` instruction. */
5343
+static void __attribute__((unused))
5344
+tcg_out_opc_vpickve2gr_w(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk2)
5345
+{
5346
+ tcg_out32(s, encode_dvjuk2_insn(OPC_VPICKVE2GR_W, d, vj, uk2));
5347
+}
5348
+
5349
+/* Emits the `vpickve2gr.d d, vj, uk1` instruction. */
5350
+static void __attribute__((unused))
5351
+tcg_out_opc_vpickve2gr_d(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk1)
5352
+{
5353
+ tcg_out32(s, encode_dvjuk1_insn(OPC_VPICKVE2GR_D, d, vj, uk1));
5354
+}
5355
+
5356
+/* Emits the `vpickve2gr.bu d, vj, uk4` instruction. */
5357
+static void __attribute__((unused))
5358
+tcg_out_opc_vpickve2gr_bu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk4)
5359
+{
5360
+ tcg_out32(s, encode_dvjuk4_insn(OPC_VPICKVE2GR_BU, d, vj, uk4));
5361
+}
5362
+
5363
+/* Emits the `vpickve2gr.hu d, vj, uk3` instruction. */
5364
+static void __attribute__((unused))
5365
+tcg_out_opc_vpickve2gr_hu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk3)
5366
+{
5367
+ tcg_out32(s, encode_dvjuk3_insn(OPC_VPICKVE2GR_HU, d, vj, uk3));
5368
+}
5369
+
5370
+/* Emits the `vpickve2gr.wu d, vj, uk2` instruction. */
5371
+static void __attribute__((unused))
5372
+tcg_out_opc_vpickve2gr_wu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk2)
5373
+{
5374
+ tcg_out32(s, encode_dvjuk2_insn(OPC_VPICKVE2GR_WU, d, vj, uk2));
5375
+}
5376
+
5377
+/* Emits the `vpickve2gr.du d, vj, uk1` instruction. */
5378
+static void __attribute__((unused))
5379
+tcg_out_opc_vpickve2gr_du(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk1)
5380
+{
5381
+ tcg_out32(s, encode_dvjuk1_insn(OPC_VPICKVE2GR_DU, d, vj, uk1));
5382
+}
5383
+
5384
+/* Emits the `vreplvei.b vd, vj, uk4` instruction. */
5385
+static void __attribute__((unused))
5386
+tcg_out_opc_vreplvei_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5387
+{
5388
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VREPLVEI_B, vd, vj, uk4));
5389
+}
5390
+
5391
+/* Emits the `vreplvei.h vd, vj, uk3` instruction. */
5392
+static void __attribute__((unused))
5393
+tcg_out_opc_vreplvei_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5394
+{
5395
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VREPLVEI_H, vd, vj, uk3));
5396
+}
5397
+
5398
+/* Emits the `vreplvei.w vd, vj, uk2` instruction. */
5399
+static void __attribute__((unused))
5400
+tcg_out_opc_vreplvei_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk2)
5401
+{
5402
+ tcg_out32(s, encode_vdvjuk2_insn(OPC_VREPLVEI_W, vd, vj, uk2));
5403
+}
5404
+
5405
+/* Emits the `vreplvei.d vd, vj, uk1` instruction. */
5406
+static void __attribute__((unused))
5407
+tcg_out_opc_vreplvei_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk1)
5408
+{
5409
+ tcg_out32(s, encode_vdvjuk1_insn(OPC_VREPLVEI_D, vd, vj, uk1));
5410
+}
5411
+
5412
+/* Emits the `vsllwil.h.b vd, vj, uk3` instruction. */
5413
+static void __attribute__((unused))
5414
+tcg_out_opc_vsllwil_h_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5415
+{
5416
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_H_B, vd, vj, uk3));
5417
+}
5418
+
5419
+/* Emits the `vsllwil.w.h vd, vj, uk4` instruction. */
5420
+static void __attribute__((unused))
5421
+tcg_out_opc_vsllwil_w_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5422
+{
5423
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_W_H, vd, vj, uk4));
5424
+}
5425
+
5426
+/* Emits the `vsllwil.d.w vd, vj, uk5` instruction. */
5427
+static void __attribute__((unused))
5428
+tcg_out_opc_vsllwil_d_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5429
+{
5430
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_D_W, vd, vj, uk5));
5431
+}
5432
+
5433
+/* Emits the `vextl.q.d vd, vj` instruction. */
5434
+static void __attribute__((unused))
5435
+tcg_out_opc_vextl_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
5436
+{
5437
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_Q_D, vd, vj));
5438
+}
5439
+
5440
+/* Emits the `vsllwil.hu.bu vd, vj, uk3` instruction. */
5441
+static void __attribute__((unused))
5442
+tcg_out_opc_vsllwil_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5443
+{
5444
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_HU_BU, vd, vj, uk3));
5445
+}
5446
+
5447
+/* Emits the `vsllwil.wu.hu vd, vj, uk4` instruction. */
5448
+static void __attribute__((unused))
5449
+tcg_out_opc_vsllwil_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5450
+{
5451
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_WU_HU, vd, vj, uk4));
5452
+}
5453
+
5454
+/* Emits the `vsllwil.du.wu vd, vj, uk5` instruction. */
5455
+static void __attribute__((unused))
5456
+tcg_out_opc_vsllwil_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5457
+{
5458
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_DU_WU, vd, vj, uk5));
5459
+}
5460
+
5461
+/* Emits the `vextl.qu.du vd, vj` instruction. */
5462
+static void __attribute__((unused))
5463
+tcg_out_opc_vextl_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
5464
+{
5465
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_QU_DU, vd, vj));
5466
+}
5467
+
5468
+/* Emits the `vbitclri.b vd, vj, uk3` instruction. */
5469
+static void __attribute__((unused))
5470
+tcg_out_opc_vbitclri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5471
+{
5472
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITCLRI_B, vd, vj, uk3));
5473
+}
5474
+
5475
+/* Emits the `vbitclri.h vd, vj, uk4` instruction. */
5476
+static void __attribute__((unused))
5477
+tcg_out_opc_vbitclri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5478
+{
5479
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITCLRI_H, vd, vj, uk4));
5480
+}
5481
+
5482
+/* Emits the `vbitclri.w vd, vj, uk5` instruction. */
5483
+static void __attribute__((unused))
5484
+tcg_out_opc_vbitclri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5485
+{
5486
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITCLRI_W, vd, vj, uk5));
5487
+}
5488
+
5489
+/* Emits the `vbitclri.d vd, vj, uk6` instruction. */
5490
+static void __attribute__((unused))
5491
+tcg_out_opc_vbitclri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5492
+{
5493
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITCLRI_D, vd, vj, uk6));
5494
+}
5495
+
5496
+/* Emits the `vbitseti.b vd, vj, uk3` instruction. */
5497
+static void __attribute__((unused))
5498
+tcg_out_opc_vbitseti_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5499
+{
5500
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITSETI_B, vd, vj, uk3));
5501
+}
5502
+
5503
+/* Emits the `vbitseti.h vd, vj, uk4` instruction. */
5504
+static void __attribute__((unused))
5505
+tcg_out_opc_vbitseti_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5506
+{
5507
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITSETI_H, vd, vj, uk4));
5508
+}
5509
+
5510
+/* Emits the `vbitseti.w vd, vj, uk5` instruction. */
5511
+static void __attribute__((unused))
5512
+tcg_out_opc_vbitseti_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5513
+{
5514
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITSETI_W, vd, vj, uk5));
5515
+}
5516
+
5517
+/* Emits the `vbitseti.d vd, vj, uk6` instruction. */
5518
+static void __attribute__((unused))
5519
+tcg_out_opc_vbitseti_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5520
+{
5521
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITSETI_D, vd, vj, uk6));
5522
+}
5523
+
5524
+/* Emits the `vbitrevi.b vd, vj, uk3` instruction. */
5525
+static void __attribute__((unused))
5526
+tcg_out_opc_vbitrevi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5527
+{
5528
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITREVI_B, vd, vj, uk3));
5529
+}
5530
+
5531
+/* Emits the `vbitrevi.h vd, vj, uk4` instruction. */
5532
+static void __attribute__((unused))
5533
+tcg_out_opc_vbitrevi_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5534
+{
5535
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITREVI_H, vd, vj, uk4));
5536
+}
5537
+
5538
+/* Emits the `vbitrevi.w vd, vj, uk5` instruction. */
5539
+static void __attribute__((unused))
5540
+tcg_out_opc_vbitrevi_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5541
+{
5542
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITREVI_W, vd, vj, uk5));
5543
+}
5544
+
5545
+/* Emits the `vbitrevi.d vd, vj, uk6` instruction. */
5546
+static void __attribute__((unused))
5547
+tcg_out_opc_vbitrevi_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5548
+{
5549
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITREVI_D, vd, vj, uk6));
5550
+}
5551
+
5552
+/* Emits the `vsat.b vd, vj, uk3` instruction. */
5553
+static void __attribute__((unused))
5554
+tcg_out_opc_vsat_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5555
+{
5556
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_B, vd, vj, uk3));
5557
+}
5558
+
5559
+/* Emits the `vsat.h vd, vj, uk4` instruction. */
5560
+static void __attribute__((unused))
5561
+tcg_out_opc_vsat_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5562
+{
5563
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_H, vd, vj, uk4));
5564
+}
5565
+
5566
+/* Emits the `vsat.w vd, vj, uk5` instruction. */
5567
+static void __attribute__((unused))
5568
+tcg_out_opc_vsat_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5569
+{
5570
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_W, vd, vj, uk5));
5571
+}
5572
+
5573
+/* Emits the `vsat.d vd, vj, uk6` instruction. */
5574
+static void __attribute__((unused))
5575
+tcg_out_opc_vsat_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5576
+{
5577
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_D, vd, vj, uk6));
5578
+}
5579
+
5580
+/* Emits the `vsat.bu vd, vj, uk3` instruction. */
5581
+static void __attribute__((unused))
5582
+tcg_out_opc_vsat_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5583
+{
5584
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_BU, vd, vj, uk3));
5585
+}
5586
+
5587
+/* Emits the `vsat.hu vd, vj, uk4` instruction. */
5588
+static void __attribute__((unused))
5589
+tcg_out_opc_vsat_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5590
+{
5591
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_HU, vd, vj, uk4));
5592
+}
5593
+
5594
+/* Emits the `vsat.wu vd, vj, uk5` instruction. */
5595
+static void __attribute__((unused))
5596
+tcg_out_opc_vsat_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5597
+{
5598
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_WU, vd, vj, uk5));
5599
+}
5600
+
5601
+/* Emits the `vsat.du vd, vj, uk6` instruction. */
5602
+static void __attribute__((unused))
5603
+tcg_out_opc_vsat_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5604
+{
5605
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_DU, vd, vj, uk6));
5606
+}
5607
+
5608
+/* Emits the `vslli.b vd, vj, uk3` instruction. */
5609
+static void __attribute__((unused))
5610
+tcg_out_opc_vslli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5611
+{
5612
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLI_B, vd, vj, uk3));
5613
+}
5614
+
5615
+/* Emits the `vslli.h vd, vj, uk4` instruction. */
5616
+static void __attribute__((unused))
5617
+tcg_out_opc_vslli_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5618
+{
5619
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLI_H, vd, vj, uk4));
5620
+}
5621
+
5622
+/* Emits the `vslli.w vd, vj, uk5` instruction. */
5623
+static void __attribute__((unused))
5624
+tcg_out_opc_vslli_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5625
+{
5626
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLI_W, vd, vj, uk5));
5627
+}
5628
+
5629
+/* Emits the `vslli.d vd, vj, uk6` instruction. */
5630
+static void __attribute__((unused))
5631
+tcg_out_opc_vslli_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5632
+{
5633
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSLLI_D, vd, vj, uk6));
5634
+}
5635
+
5636
+/* Emits the `vsrli.b vd, vj, uk3` instruction. */
5637
+static void __attribute__((unused))
5638
+tcg_out_opc_vsrli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5639
+{
5640
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRLI_B, vd, vj, uk3));
5641
+}
5642
+
5643
+/* Emits the `vsrli.h vd, vj, uk4` instruction. */
5644
+static void __attribute__((unused))
5645
+tcg_out_opc_vsrli_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5646
+{
5647
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLI_H, vd, vj, uk4));
5648
+}
5649
+
5650
+/* Emits the `vsrli.w vd, vj, uk5` instruction. */
5651
+static void __attribute__((unused))
5652
+tcg_out_opc_vsrli_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5653
+{
5654
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLI_W, vd, vj, uk5));
5655
+}
5656
+
5657
+/* Emits the `vsrli.d vd, vj, uk6` instruction. */
5658
+static void __attribute__((unused))
5659
+tcg_out_opc_vsrli_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5660
+{
5661
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLI_D, vd, vj, uk6));
5662
+}
5663
+
5664
+/* Emits the `vsrai.b vd, vj, uk3` instruction. */
5665
+static void __attribute__((unused))
5666
+tcg_out_opc_vsrai_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5667
+{
5668
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRAI_B, vd, vj, uk3));
5669
+}
5670
+
5671
+/* Emits the `vsrai.h vd, vj, uk4` instruction. */
5672
+static void __attribute__((unused))
5673
+tcg_out_opc_vsrai_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5674
+{
5675
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRAI_H, vd, vj, uk4));
5676
+}
5677
+
5678
+/* Emits the `vsrai.w vd, vj, uk5` instruction. */
5679
+static void __attribute__((unused))
5680
+tcg_out_opc_vsrai_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5681
+{
5682
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRAI_W, vd, vj, uk5));
5683
+}
5684
+
5685
+/* Emits the `vsrai.d vd, vj, uk6` instruction. */
5686
+static void __attribute__((unused))
5687
+tcg_out_opc_vsrai_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5688
+{
5689
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRAI_D, vd, vj, uk6));
5690
+}
5691
+
5692
+/* Emits the `vsrlni.b.h vd, vj, uk4` instruction. */
5693
+static void __attribute__((unused))
5694
+tcg_out_opc_vsrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5695
+{
5696
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLNI_B_H, vd, vj, uk4));
5697
+}
5698
+
5699
+/* Emits the `vsrlni.h.w vd, vj, uk5` instruction. */
5700
+static void __attribute__((unused))
5701
+tcg_out_opc_vsrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5702
+{
5703
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLNI_H_W, vd, vj, uk5));
5704
+}
5705
+
5706
+/* Emits the `vsrlni.w.d vd, vj, uk6` instruction. */
5707
+static void __attribute__((unused))
5708
+tcg_out_opc_vsrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5709
+{
5710
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLNI_W_D, vd, vj, uk6));
5711
+}
5712
+
5713
+/* Emits the `vsrlni.d.q vd, vj, uk7` instruction. */
5714
+static void __attribute__((unused))
5715
+tcg_out_opc_vsrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5716
+{
5717
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLNI_D_Q, vd, vj, uk7));
5718
+}
5719
+
5720
+/* Emits the `vsrlrni.b.h vd, vj, uk4` instruction. */
5721
+static void __attribute__((unused))
5722
+tcg_out_opc_vsrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5723
+{
5724
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRNI_B_H, vd, vj, uk4));
5725
+}
5726
+
5727
+/* Emits the `vsrlrni.h.w vd, vj, uk5` instruction. */
5728
+static void __attribute__((unused))
5729
+tcg_out_opc_vsrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5730
+{
5731
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRNI_H_W, vd, vj, uk5));
5732
+}
5733
+
5734
+/* Emits the `vsrlrni.w.d vd, vj, uk6` instruction. */
5735
+static void __attribute__((unused))
5736
+tcg_out_opc_vsrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5737
+{
5738
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRNI_W_D, vd, vj, uk6));
5739
+}
5740
+
5741
+/* Emits the `vsrlrni.d.q vd, vj, uk7` instruction. */
5742
+static void __attribute__((unused))
5743
+tcg_out_opc_vsrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5744
+{
5745
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLRNI_D_Q, vd, vj, uk7));
5746
+}
5747
+
5748
+/* Emits the `vssrlni.b.h vd, vj, uk4` instruction. */
5749
+static void __attribute__((unused))
5750
+tcg_out_opc_vssrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5751
+{
5752
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_B_H, vd, vj, uk4));
5753
+}
5754
+
5755
+/* Emits the `vssrlni.h.w vd, vj, uk5` instruction. */
5756
+static void __attribute__((unused))
5757
+tcg_out_opc_vssrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5758
+{
5759
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_H_W, vd, vj, uk5));
5760
+}
5761
+
5762
+/* Emits the `vssrlni.w.d vd, vj, uk6` instruction. */
5763
+static void __attribute__((unused))
5764
+tcg_out_opc_vssrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5765
+{
5766
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_W_D, vd, vj, uk6));
5767
+}
5768
+
5769
+/* Emits the `vssrlni.d.q vd, vj, uk7` instruction. */
5770
+static void __attribute__((unused))
5771
+tcg_out_opc_vssrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5772
+{
5773
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_D_Q, vd, vj, uk7));
5774
+}
5775
+
5776
+/* Emits the `vssrlni.bu.h vd, vj, uk4` instruction. */
5777
+static void __attribute__((unused))
5778
+tcg_out_opc_vssrlni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5779
+{
5780
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_BU_H, vd, vj, uk4));
5781
+}
5782
+
5783
+/* Emits the `vssrlni.hu.w vd, vj, uk5` instruction. */
5784
+static void __attribute__((unused))
5785
+tcg_out_opc_vssrlni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5786
+{
5787
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_HU_W, vd, vj, uk5));
5788
+}
5789
+
5790
+/* Emits the `vssrlni.wu.d vd, vj, uk6` instruction. */
5791
+static void __attribute__((unused))
5792
+tcg_out_opc_vssrlni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5793
+{
5794
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_WU_D, vd, vj, uk6));
5795
+}
5796
+
5797
+/* Emits the `vssrlni.du.q vd, vj, uk7` instruction. */
5798
+static void __attribute__((unused))
5799
+tcg_out_opc_vssrlni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5800
+{
5801
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_DU_Q, vd, vj, uk7));
5802
+}
5803
+
5804
+/* Emits the `vssrlrni.b.h vd, vj, uk4` instruction. */
5805
+static void __attribute__((unused))
5806
+tcg_out_opc_vssrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5807
+{
5808
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_B_H, vd, vj, uk4));
5809
+}
5810
+
5811
+/* Emits the `vssrlrni.h.w vd, vj, uk5` instruction. */
5812
+static void __attribute__((unused))
5813
+tcg_out_opc_vssrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5814
+{
5815
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_H_W, vd, vj, uk5));
5816
+}
5817
+
5818
+/* Emits the `vssrlrni.w.d vd, vj, uk6` instruction. */
5819
+static void __attribute__((unused))
5820
+tcg_out_opc_vssrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5821
+{
5822
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_W_D, vd, vj, uk6));
5823
+}
5824
+
5825
+/* Emits the `vssrlrni.d.q vd, vj, uk7` instruction. */
5826
+static void __attribute__((unused))
5827
+tcg_out_opc_vssrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5828
+{
5829
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_D_Q, vd, vj, uk7));
5830
+}
5831
+
5832
+/* Emits the `vssrlrni.bu.h vd, vj, uk4` instruction. */
5833
+static void __attribute__((unused))
5834
+tcg_out_opc_vssrlrni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5835
+{
5836
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_BU_H, vd, vj, uk4));
5837
+}
5838
+
5839
+/* Emits the `vssrlrni.hu.w vd, vj, uk5` instruction. */
5840
+static void __attribute__((unused))
5841
+tcg_out_opc_vssrlrni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5842
+{
5843
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_HU_W, vd, vj, uk5));
5844
+}
5845
+
5846
+/* Emits the `vssrlrni.wu.d vd, vj, uk6` instruction. */
5847
+static void __attribute__((unused))
5848
+tcg_out_opc_vssrlrni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5849
+{
5850
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_WU_D, vd, vj, uk6));
5851
+}
5852
+
5853
+/* Emits the `vssrlrni.du.q vd, vj, uk7` instruction. */
5854
+static void __attribute__((unused))
5855
+tcg_out_opc_vssrlrni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5856
+{
5857
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_DU_Q, vd, vj, uk7));
5858
+}
5859
+
5860
+/* Emits the `vsrani.b.h vd, vj, uk4` instruction. */
5861
+static void __attribute__((unused))
5862
+tcg_out_opc_vsrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5863
+{
5864
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRANI_B_H, vd, vj, uk4));
5865
+}
5866
+
5867
+/* Emits the `vsrani.h.w vd, vj, uk5` instruction. */
5868
+static void __attribute__((unused))
5869
+tcg_out_opc_vsrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5870
+{
5871
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRANI_H_W, vd, vj, uk5));
5872
+}
5873
+
5874
+/* Emits the `vsrani.w.d vd, vj, uk6` instruction. */
5875
+static void __attribute__((unused))
5876
+tcg_out_opc_vsrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5877
+{
5878
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRANI_W_D, vd, vj, uk6));
5879
+}
5880
+
5881
+/* Emits the `vsrani.d.q vd, vj, uk7` instruction. */
5882
+static void __attribute__((unused))
5883
+tcg_out_opc_vsrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5884
+{
5885
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRANI_D_Q, vd, vj, uk7));
5886
+}
5887
+
5888
+/* Emits the `vsrarni.b.h vd, vj, uk4` instruction. */
5889
+static void __attribute__((unused))
5890
+tcg_out_opc_vsrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5891
+{
5892
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARNI_B_H, vd, vj, uk4));
5893
+}
5894
+
5895
+/* Emits the `vsrarni.h.w vd, vj, uk5` instruction. */
5896
+static void __attribute__((unused))
5897
+tcg_out_opc_vsrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5898
+{
5899
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARNI_H_W, vd, vj, uk5));
5900
+}
5901
+
5902
+/* Emits the `vsrarni.w.d vd, vj, uk6` instruction. */
5903
+static void __attribute__((unused))
5904
+tcg_out_opc_vsrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5905
+{
5906
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARNI_W_D, vd, vj, uk6));
5907
+}
5908
+
5909
+/* Emits the `vsrarni.d.q vd, vj, uk7` instruction. */
5910
+static void __attribute__((unused))
5911
+tcg_out_opc_vsrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5912
+{
5913
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRARNI_D_Q, vd, vj, uk7));
5914
+}
5915
+
5916
+/* Emits the `vssrani.b.h vd, vj, uk4` instruction. */
5917
+static void __attribute__((unused))
5918
+tcg_out_opc_vssrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5919
+{
5920
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_B_H, vd, vj, uk4));
5921
+}
5922
+
5923
+/* Emits the `vssrani.h.w vd, vj, uk5` instruction. */
5924
+static void __attribute__((unused))
5925
+tcg_out_opc_vssrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5926
+{
5927
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_H_W, vd, vj, uk5));
5928
+}
5929
+
5930
+/* Emits the `vssrani.w.d vd, vj, uk6` instruction. */
5931
+static void __attribute__((unused))
5932
+tcg_out_opc_vssrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5933
+{
5934
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_W_D, vd, vj, uk6));
5935
+}
5936
+
5937
+/* Emits the `vssrani.d.q vd, vj, uk7` instruction. */
5938
+static void __attribute__((unused))
5939
+tcg_out_opc_vssrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5940
+{
5941
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_D_Q, vd, vj, uk7));
5942
+}
5943
+
5944
+/* Emits the `vssrani.bu.h vd, vj, uk4` instruction. */
5945
+static void __attribute__((unused))
5946
+tcg_out_opc_vssrani_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5947
+{
5948
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_BU_H, vd, vj, uk4));
5949
+}
5950
+
5951
+/* Emits the `vssrani.hu.w vd, vj, uk5` instruction. */
5952
+static void __attribute__((unused))
5953
+tcg_out_opc_vssrani_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5954
+{
5955
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_HU_W, vd, vj, uk5));
5956
+}
5957
+
5958
+/* Emits the `vssrani.wu.d vd, vj, uk6` instruction. */
5959
+static void __attribute__((unused))
5960
+tcg_out_opc_vssrani_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5961
+{
5962
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_WU_D, vd, vj, uk6));
5963
+}
5964
+
5965
+/* Emits the `vssrani.du.q vd, vj, uk7` instruction. */
5966
+static void __attribute__((unused))
5967
+tcg_out_opc_vssrani_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5968
+{
5969
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_DU_Q, vd, vj, uk7));
5970
+}
5971
+
5972
+/* Emits the `vssrarni.b.h vd, vj, uk4` instruction. */
5973
+static void __attribute__((unused))
5974
+tcg_out_opc_vssrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5975
+{
5976
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_B_H, vd, vj, uk4));
5977
+}
5978
+
5979
+/* Emits the `vssrarni.h.w vd, vj, uk5` instruction. */
5980
+static void __attribute__((unused))
5981
+tcg_out_opc_vssrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5982
+{
5983
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_H_W, vd, vj, uk5));
5984
+}
5985
+
5986
+/* Emits the `vssrarni.w.d vd, vj, uk6` instruction. */
5987
+static void __attribute__((unused))
5988
+tcg_out_opc_vssrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5989
+{
5990
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_W_D, vd, vj, uk6));
5991
+}
5992
+
5993
+/* Emits the `vssrarni.d.q vd, vj, uk7` instruction. */
5994
+static void __attribute__((unused))
5995
+tcg_out_opc_vssrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5996
+{
5997
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_D_Q, vd, vj, uk7));
5998
+}
5999
+
6000
+/* Emits the `vssrarni.bu.h vd, vj, uk4` instruction. */
6001
+static void __attribute__((unused))
6002
+tcg_out_opc_vssrarni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
6003
+{
6004
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_BU_H, vd, vj, uk4));
6005
+}
6006
+
6007
+/* Emits the `vssrarni.hu.w vd, vj, uk5` instruction. */
6008
+static void __attribute__((unused))
6009
+tcg_out_opc_vssrarni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
6010
+{
6011
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_HU_W, vd, vj, uk5));
6012
+}
6013
+
6014
+/* Emits the `vssrarni.wu.d vd, vj, uk6` instruction. */
6015
+static void __attribute__((unused))
6016
+tcg_out_opc_vssrarni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
6017
+{
6018
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_WU_D, vd, vj, uk6));
6019
+}
6020
+
6021
+/* Emits the `vssrarni.du.q vd, vj, uk7` instruction. */
6022
+static void __attribute__((unused))
6023
+tcg_out_opc_vssrarni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
6024
+{
6025
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_DU_Q, vd, vj, uk7));
6026
+}
6027
+
6028
+/* Emits the `vextrins.d vd, vj, uk8` instruction. */
6029
+static void __attribute__((unused))
6030
+tcg_out_opc_vextrins_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6031
+{
6032
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_D, vd, vj, uk8));
6033
+}
6034
+
6035
+/* Emits the `vextrins.w vd, vj, uk8` instruction. */
6036
+static void __attribute__((unused))
6037
+tcg_out_opc_vextrins_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6038
+{
6039
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_W, vd, vj, uk8));
6040
+}
6041
+
6042
+/* Emits the `vextrins.h vd, vj, uk8` instruction. */
6043
+static void __attribute__((unused))
6044
+tcg_out_opc_vextrins_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6045
+{
6046
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_H, vd, vj, uk8));
6047
+}
6048
+
6049
+/* Emits the `vextrins.b vd, vj, uk8` instruction. */
6050
+static void __attribute__((unused))
6051
+tcg_out_opc_vextrins_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6052
+{
6053
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_B, vd, vj, uk8));
6054
+}
6055
+
6056
+/* Emits the `vshuf4i.b vd, vj, uk8` instruction. */
6057
+static void __attribute__((unused))
6058
+tcg_out_opc_vshuf4i_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6059
+{
6060
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_B, vd, vj, uk8));
6061
+}
6062
+
6063
+/* Emits the `vshuf4i.h vd, vj, uk8` instruction. */
6064
+static void __attribute__((unused))
6065
+tcg_out_opc_vshuf4i_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6066
+{
6067
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_H, vd, vj, uk8));
6068
+}
6069
+
6070
+/* Emits the `vshuf4i.w vd, vj, uk8` instruction. */
6071
+static void __attribute__((unused))
6072
+tcg_out_opc_vshuf4i_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6073
+{
6074
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_W, vd, vj, uk8));
6075
+}
6076
+
6077
+/* Emits the `vshuf4i.d vd, vj, uk8` instruction. */
6078
+static void __attribute__((unused))
6079
+tcg_out_opc_vshuf4i_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6080
+{
6081
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_D, vd, vj, uk8));
6082
+}
6083
+
6084
+/* Emits the `vbitseli.b vd, vj, uk8` instruction. */
6085
+static void __attribute__((unused))
6086
+tcg_out_opc_vbitseli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6087
+{
6088
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VBITSELI_B, vd, vj, uk8));
6089
+}
6090
+
6091
+/* Emits the `vandi.b vd, vj, uk8` instruction. */
6092
+static void __attribute__((unused))
6093
+tcg_out_opc_vandi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6094
+{
6095
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VANDI_B, vd, vj, uk8));
6096
+}
6097
+
6098
+/* Emits the `vori.b vd, vj, uk8` instruction. */
6099
+static void __attribute__((unused))
6100
+tcg_out_opc_vori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6101
+{
6102
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VORI_B, vd, vj, uk8));
6103
+}
6104
+
6105
+/* Emits the `vxori.b vd, vj, uk8` instruction. */
6106
+static void __attribute__((unused))
6107
+tcg_out_opc_vxori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6108
+{
6109
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VXORI_B, vd, vj, uk8));
6110
+}
6111
+
6112
+/* Emits the `vnori.b vd, vj, uk8` instruction. */
6113
+static void __attribute__((unused))
6114
+tcg_out_opc_vnori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6115
+{
6116
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VNORI_B, vd, vj, uk8));
6117
+}
6118
+
6119
+/* Emits the `vldi vd, sj13` instruction. */
6120
+static void __attribute__((unused))
6121
+tcg_out_opc_vldi(TCGContext *s, TCGReg vd, int32_t sj13)
6122
+{
6123
+ tcg_out32(s, encode_vdsj13_insn(OPC_VLDI, vd, sj13));
6124
+}
6125
+
6126
+/* Emits the `vpermi.w vd, vj, uk8` instruction. */
6127
+static void __attribute__((unused))
6128
+tcg_out_opc_vpermi_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6129
+{
6130
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VPERMI_W, vd, vj, uk8));
6131
+}
6132
+
6133
/* End of generated code. */
561
--
6134
--
562
2.34.1
6135
2.34.1
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means
1
From: Jiajie Chen <c@jia.je>
2
that we've got to mark the commpage executable. We had
3
been placing the commpage outside of reserved_va, which
4
was incorrect and lead to an abort.
5
2
6
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
3
LSX support on host cpu is detected via hwcap.
7
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
5
Lower the following ops to LSX:
6
7
- dup_vec
8
- dupi_vec
9
- dupm_vec
10
- ld_vec
11
- st_vec
12
13
Signed-off-by: Jiajie Chen <c@jia.je>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-Id: <20230908022302.180442-3-c@jia.je>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
17
---
10
linux-user/arm/target_cpu.h | 4 ++--
18
tcg/loongarch64/tcg-target-con-set.h | 2 +
11
linux-user/elfload.c | 6 +++++-
19
tcg/loongarch64/tcg-target-con-str.h | 1 +
12
2 files changed, 7 insertions(+), 3 deletions(-)
20
tcg/loongarch64/tcg-target.h | 38 ++++-
21
tcg/loongarch64/tcg-target.opc.h | 12 ++
22
tcg/loongarch64/tcg-target.c.inc | 219 ++++++++++++++++++++++++++-
23
5 files changed, 270 insertions(+), 2 deletions(-)
24
create mode 100644 tcg/loongarch64/tcg-target.opc.h
13
25
14
diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h
26
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
15
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/arm/target_cpu.h
28
--- a/tcg/loongarch64/tcg-target-con-set.h
17
+++ b/linux-user/arm/target_cpu.h
29
+++ b/tcg/loongarch64/tcg-target-con-set.h
18
@@ -XXX,XX +XXX,XX @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
30
@@ -XXX,XX +XXX,XX @@
19
} else {
31
C_O0_I1(r)
20
/*
32
C_O0_I2(rZ, r)
21
* We need to be able to map the commpage.
33
C_O0_I2(rZ, rZ)
22
- * See validate_guest_space in linux-user/elfload.c.
34
+C_O0_I2(w, r)
23
+ * See init_guest_commpage in linux-user/elfload.c.
35
C_O1_I1(r, r)
24
*/
36
+C_O1_I1(w, r)
25
- return 0xffff0000ul;
37
C_O1_I2(r, r, rC)
26
+ return 0xfffffffful;
38
C_O1_I2(r, r, ri)
39
C_O1_I2(r, r, rI)
40
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
41
index XXXXXXX..XXXXXXX 100644
42
--- a/tcg/loongarch64/tcg-target-con-str.h
43
+++ b/tcg/loongarch64/tcg-target-con-str.h
44
@@ -XXX,XX +XXX,XX @@
45
* REGS(letter, register_mask)
46
*/
47
REGS('r', ALL_GENERAL_REGS)
48
+REGS('w', ALL_VECTOR_REGS)
49
50
/*
51
* Define constraint letters for constants:
52
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/loongarch64/tcg-target.h
55
+++ b/tcg/loongarch64/tcg-target.h
56
@@ -XXX,XX +XXX,XX @@
57
#define LOONGARCH_TCG_TARGET_H
58
59
#define TCG_TARGET_INSN_UNIT_SIZE 4
60
-#define TCG_TARGET_NB_REGS 32
61
+#define TCG_TARGET_NB_REGS 64
62
63
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
64
65
@@ -XXX,XX +XXX,XX @@ typedef enum {
66
TCG_REG_S7,
67
TCG_REG_S8,
68
69
+ TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
70
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
71
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
72
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
73
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
74
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
75
+ TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
76
+ TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
77
+
78
/* aliases */
79
TCG_AREG0 = TCG_REG_S0,
80
TCG_REG_TMP0 = TCG_REG_T8,
81
TCG_REG_TMP1 = TCG_REG_T7,
82
TCG_REG_TMP2 = TCG_REG_T6,
83
+ TCG_VEC_TMP0 = TCG_REG_V23,
84
} TCGReg;
85
86
+extern bool use_lsx_instructions;
87
+
88
/* used for function call generation */
89
#define TCG_REG_CALL_STACK TCG_REG_SP
90
#define TCG_TARGET_STACK_ALIGN 16
91
@@ -XXX,XX +XXX,XX @@ typedef enum {
92
93
#define TCG_TARGET_HAS_qemu_ldst_i128 0
94
95
+#define TCG_TARGET_HAS_v64 0
96
+#define TCG_TARGET_HAS_v128 use_lsx_instructions
97
+#define TCG_TARGET_HAS_v256 0
98
+
99
+#define TCG_TARGET_HAS_not_vec 0
100
+#define TCG_TARGET_HAS_neg_vec 0
101
+#define TCG_TARGET_HAS_abs_vec 0
102
+#define TCG_TARGET_HAS_andc_vec 0
103
+#define TCG_TARGET_HAS_orc_vec 0
104
+#define TCG_TARGET_HAS_nand_vec 0
105
+#define TCG_TARGET_HAS_nor_vec 0
106
+#define TCG_TARGET_HAS_eqv_vec 0
107
+#define TCG_TARGET_HAS_mul_vec 0
108
+#define TCG_TARGET_HAS_shi_vec 0
109
+#define TCG_TARGET_HAS_shs_vec 0
110
+#define TCG_TARGET_HAS_shv_vec 0
111
+#define TCG_TARGET_HAS_roti_vec 0
112
+#define TCG_TARGET_HAS_rots_vec 0
113
+#define TCG_TARGET_HAS_rotv_vec 0
114
+#define TCG_TARGET_HAS_sat_vec 0
115
+#define TCG_TARGET_HAS_minmax_vec 0
116
+#define TCG_TARGET_HAS_bitsel_vec 0
117
+#define TCG_TARGET_HAS_cmpsel_vec 0
118
+
119
#define TCG_TARGET_DEFAULT_MO (0)
120
121
#define TCG_TARGET_NEED_LDST_LABELS
122
diff --git a/tcg/loongarch64/tcg-target.opc.h b/tcg/loongarch64/tcg-target.opc.h
123
new file mode 100644
124
index XXXXXXX..XXXXXXX
125
--- /dev/null
126
+++ b/tcg/loongarch64/tcg-target.opc.h
127
@@ -XXX,XX +XXX,XX @@
128
+/*
129
+ * Copyright (c) 2023 Jiajie Chen
130
+ *
131
+ * This work is licensed under the terms of the GNU GPL, version 2 or
132
+ * (at your option) any later version.
133
+ *
134
+ * See the COPYING file in the top-level directory for details.
135
+ *
136
+ * Target-specific opcodes for host vector expansion. These will be
137
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
138
+ * consider these to be UNSPEC with names.
139
+ */
140
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
141
index XXXXXXX..XXXXXXX 100644
142
--- a/tcg/loongarch64/tcg-target.c.inc
143
+++ b/tcg/loongarch64/tcg-target.c.inc
144
@@ -XXX,XX +XXX,XX @@
145
#include "../tcg-ldst.c.inc"
146
#include <asm/hwcap.h>
147
148
+bool use_lsx_instructions;
149
+
150
#ifdef CONFIG_DEBUG_TCG
151
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
152
"zero",
153
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
154
"s5",
155
"s6",
156
"s7",
157
- "s8"
158
+ "s8",
159
+ "vr0",
160
+ "vr1",
161
+ "vr2",
162
+ "vr3",
163
+ "vr4",
164
+ "vr5",
165
+ "vr6",
166
+ "vr7",
167
+ "vr8",
168
+ "vr9",
169
+ "vr10",
170
+ "vr11",
171
+ "vr12",
172
+ "vr13",
173
+ "vr14",
174
+ "vr15",
175
+ "vr16",
176
+ "vr17",
177
+ "vr18",
178
+ "vr19",
179
+ "vr20",
180
+ "vr21",
181
+ "vr22",
182
+ "vr23",
183
+ "vr24",
184
+ "vr25",
185
+ "vr26",
186
+ "vr27",
187
+ "vr28",
188
+ "vr29",
189
+ "vr30",
190
+ "vr31",
191
};
192
#endif
193
194
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
195
TCG_REG_A2,
196
TCG_REG_A1,
197
TCG_REG_A0,
198
+
199
+ /* Vector registers */
200
+ TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
201
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
202
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
203
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
204
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
205
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
206
+ /* V24 - V31 are caller-saved, and skipped. */
207
};
208
209
static const int tcg_target_call_iarg_regs[] = {
210
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
211
#define TCG_CT_CONST_WSZ 0x2000
212
213
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
214
+#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
215
216
static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
217
{
218
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
27
}
219
}
28
}
220
}
29
#define MAX_RESERVED_VA arm_max_reserved_va
221
30
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
222
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
31
index XXXXXXX..XXXXXXX 100644
223
+ TCGReg rd, TCGReg rs)
32
--- a/linux-user/elfload.c
224
+{
33
+++ b/linux-user/elfload.c
225
+ switch (vece) {
34
@@ -XXX,XX +XXX,XX @@ enum {
226
+ case MO_8:
35
227
+ tcg_out_opc_vreplgr2vr_b(s, rd, rs);
36
static bool init_guest_commpage(void)
228
+ break;
229
+ case MO_16:
230
+ tcg_out_opc_vreplgr2vr_h(s, rd, rs);
231
+ break;
232
+ case MO_32:
233
+ tcg_out_opc_vreplgr2vr_w(s, rd, rs);
234
+ break;
235
+ case MO_64:
236
+ tcg_out_opc_vreplgr2vr_d(s, rd, rs);
237
+ break;
238
+ default:
239
+ g_assert_not_reached();
240
+ }
241
+ return true;
242
+}
243
+
244
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
245
+ TCGReg r, TCGReg base, intptr_t offset)
246
+{
247
+ /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */
248
+ if (offset < -0x800 || offset > 0x7ff || \
249
+ (offset & ((1 << vece) - 1)) != 0) {
250
+ tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
251
+ base = TCG_REG_TMP0;
252
+ offset = 0;
253
+ }
254
+ offset >>= vece;
255
+
256
+ switch (vece) {
257
+ case MO_8:
258
+ tcg_out_opc_vldrepl_b(s, r, base, offset);
259
+ break;
260
+ case MO_16:
261
+ tcg_out_opc_vldrepl_h(s, r, base, offset);
262
+ break;
263
+ case MO_32:
264
+ tcg_out_opc_vldrepl_w(s, r, base, offset);
265
+ break;
266
+ case MO_64:
267
+ tcg_out_opc_vldrepl_d(s, r, base, offset);
268
+ break;
269
+ default:
270
+ g_assert_not_reached();
271
+ }
272
+ return true;
273
+}
274
+
275
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
276
+ TCGReg rd, int64_t v64)
277
+{
278
+ /* Try vldi if imm can fit */
279
+ int64_t value = sextract64(v64, 0, 8 << vece);
280
+ if (-0x200 <= value && value <= 0x1FF) {
281
+ uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
282
+ tcg_out_opc_vldi(s, rd, imm);
283
+ return;
284
+ }
285
+
286
+ /* TODO: vldi patterns when imm 12 is set */
287
+
288
+ /* Fallback to vreplgr2vr */
289
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
290
+ switch (vece) {
291
+ case MO_8:
292
+ tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0);
293
+ break;
294
+ case MO_16:
295
+ tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0);
296
+ break;
297
+ case MO_32:
298
+ tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0);
299
+ break;
300
+ case MO_64:
301
+ tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0);
302
+ break;
303
+ default:
304
+ g_assert_not_reached();
305
+ }
306
+}
307
+
308
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
309
+ unsigned vecl, unsigned vece,
310
+ const TCGArg args[TCG_MAX_OP_ARGS],
311
+ const int const_args[TCG_MAX_OP_ARGS])
312
+{
313
+ TCGType type = vecl + TCG_TYPE_V64;
314
+ TCGArg a0, a1, a2;
315
+ TCGReg temp = TCG_REG_TMP0;
316
+
317
+ a0 = args[0];
318
+ a1 = args[1];
319
+ a2 = args[2];
320
+
321
+ /* Currently only supports V128 */
322
+ tcg_debug_assert(type == TCG_TYPE_V128);
323
+
324
+ switch (opc) {
325
+ case INDEX_op_st_vec:
326
+ /* Try to fit vst imm */
327
+ if (-0x800 <= a2 && a2 <= 0x7ff) {
328
+ tcg_out_opc_vst(s, a0, a1, a2);
329
+ } else {
330
+ tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
331
+ tcg_out_opc_vstx(s, a0, a1, temp);
332
+ }
333
+ break;
334
+ case INDEX_op_ld_vec:
335
+ /* Try to fit vld imm */
336
+ if (-0x800 <= a2 && a2 <= 0x7ff) {
337
+ tcg_out_opc_vld(s, a0, a1, a2);
338
+ } else {
339
+ tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
340
+ tcg_out_opc_vldx(s, a0, a1, temp);
341
+ }
342
+ break;
343
+ case INDEX_op_dupm_vec:
344
+ tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
345
+ break;
346
+ default:
347
+ g_assert_not_reached();
348
+ }
349
+}
350
+
351
+int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
352
+{
353
+ switch (opc) {
354
+ case INDEX_op_ld_vec:
355
+ case INDEX_op_st_vec:
356
+ case INDEX_op_dup_vec:
357
+ case INDEX_op_dupm_vec:
358
+ return 1;
359
+ default:
360
+ return 0;
361
+ }
362
+}
363
+
364
+void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
365
+ TCGArg a0, ...)
366
+{
367
+ g_assert_not_reached();
368
+}
369
+
370
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
37
{
371
{
38
- void *want = g2h_untagged(HI_COMMPAGE & -qemu_host_page_size);
372
switch (op) {
39
+ abi_ptr commpage = HI_COMMPAGE & -qemu_host_page_size;
373
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
40
+ void *want = g2h_untagged(commpage);
374
case INDEX_op_movcond_i64:
41
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
375
return C_O1_I4(r, rZ, rJ, rZ, rZ);
42
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
376
43
377
+ case INDEX_op_ld_vec:
44
@@ -XXX,XX +XXX,XX @@ static bool init_guest_commpage(void)
378
+ case INDEX_op_dupm_vec:
45
perror("Protecting guest commpage");
379
+ case INDEX_op_dup_vec:
380
+ return C_O1_I1(w, r);
381
+
382
+ case INDEX_op_st_vec:
383
+ return C_O0_I2(w, r);
384
+
385
default:
386
g_assert_not_reached();
387
}
388
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
46
exit(EXIT_FAILURE);
389
exit(EXIT_FAILURE);
47
}
390
}
48
+
391
49
+ page_set_flags(commpage, commpage + qemu_host_page_size,
392
+ if (hwcap & HWCAP_LOONGARCH_LSX) {
50
+ PAGE_READ | PAGE_EXEC | PAGE_VALID);
393
+ use_lsx_instructions = 1;
51
return true;
394
+ }
395
+
396
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
397
tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
398
399
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
400
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
401
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
402
403
+ if (use_lsx_instructions) {
404
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
405
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
406
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
407
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
408
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
409
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
410
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
411
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
412
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
413
+ }
414
+
415
s->reserved_regs = 0;
416
tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
417
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
418
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
419
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
420
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
421
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
422
+ tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
52
}
423
}
53
424
425
typedef struct {
54
--
426
--
55
2.34.1
427
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Pass vece to tcg_target_const_match() to allow correct interpretation of
4
const args of vector ops.
5
6
Signed-off-by: Jiajie Chen <c@jia.je>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Message-Id: <20230908022302.180442-4-c@jia.je>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
tcg/tcg.c | 4 ++--
13
tcg/aarch64/tcg-target.c.inc | 2 +-
14
tcg/arm/tcg-target.c.inc | 2 +-
15
tcg/i386/tcg-target.c.inc | 2 +-
16
tcg/loongarch64/tcg-target.c.inc | 2 +-
17
tcg/mips/tcg-target.c.inc | 2 +-
18
tcg/ppc/tcg-target.c.inc | 2 +-
19
tcg/riscv/tcg-target.c.inc | 2 +-
20
tcg/s390x/tcg-target.c.inc | 2 +-
21
tcg/sparc64/tcg-target.c.inc | 2 +-
22
tcg/tci/tcg-target.c.inc | 2 +-
23
11 files changed, 12 insertions(+), 12 deletions(-)
24
25
diff --git a/tcg/tcg.c b/tcg/tcg.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/tcg.c
28
+++ b/tcg/tcg.c
29
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
30
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
31
const TCGHelperInfo *info);
32
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
33
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
34
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
35
#ifdef TCG_TARGET_NEED_LDST_LABELS
36
static int tcg_out_ldst_finalize(TCGContext *s);
37
#endif
38
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
39
ts = arg_temp(arg);
40
41
if (ts->val_type == TEMP_VAL_CONST
42
- && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
43
+ && tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) {
44
/* constant is OK for instruction */
45
const_args[i] = 1;
46
new_args[i] = ts->val;
47
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
48
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/aarch64/tcg-target.c.inc
50
+++ b/tcg/aarch64/tcg-target.c.inc
51
@@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
52
}
53
}
54
55
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
56
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
57
{
58
if (ct & TCG_CT_CONST) {
59
return 1;
60
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/arm/tcg-target.c.inc
63
+++ b/tcg/arm/tcg-target.c.inc
64
@@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
65
* mov operand2: values represented with x << (2 * y), x < 0x100
66
* add, sub, eor...: ditto
67
*/
68
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
69
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
70
{
71
if (ct & TCG_CT_CONST) {
72
return 1;
73
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/i386/tcg-target.c.inc
76
+++ b/tcg/i386/tcg-target.c.inc
77
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
78
}
79
80
/* test if a constant matches the constraint */
81
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
82
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
83
{
84
if (ct & TCG_CT_CONST) {
85
return 1;
86
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/loongarch64/tcg-target.c.inc
89
+++ b/tcg/loongarch64/tcg-target.c.inc
90
@@ -XXX,XX +XXX,XX @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
91
}
92
93
/* test if a constant matches the constraint */
94
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
95
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
96
{
97
if (ct & TCG_CT_CONST) {
98
return true;
99
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/mips/tcg-target.c.inc
102
+++ b/tcg/mips/tcg-target.c.inc
103
@@ -XXX,XX +XXX,XX @@ static bool is_p2m1(tcg_target_long val)
104
}
105
106
/* test if a constant matches the constraint */
107
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
108
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
109
{
110
if (ct & TCG_CT_CONST) {
111
return 1;
112
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/ppc/tcg-target.c.inc
115
+++ b/tcg/ppc/tcg-target.c.inc
116
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
117
}
118
119
/* test if a constant matches the constraint */
120
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
121
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
122
{
123
if (ct & TCG_CT_CONST) {
124
return 1;
125
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
126
index XXXXXXX..XXXXXXX 100644
127
--- a/tcg/riscv/tcg-target.c.inc
128
+++ b/tcg/riscv/tcg-target.c.inc
129
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
130
#define sextreg sextract64
131
132
/* test if a constant matches the constraint */
133
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
134
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
135
{
136
if (ct & TCG_CT_CONST) {
137
return 1;
138
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
139
index XXXXXXX..XXXXXXX 100644
140
--- a/tcg/s390x/tcg-target.c.inc
141
+++ b/tcg/s390x/tcg-target.c.inc
142
@@ -XXX,XX +XXX,XX @@ static bool risbg_mask(uint64_t c)
143
}
144
145
/* Test if a constant matches the constraint. */
146
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
147
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
148
{
149
if (ct & TCG_CT_CONST) {
150
return 1;
151
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
152
index XXXXXXX..XXXXXXX 100644
153
--- a/tcg/sparc64/tcg-target.c.inc
154
+++ b/tcg/sparc64/tcg-target.c.inc
155
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
156
}
157
158
/* test if a constant matches the constraint */
159
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
160
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
161
{
162
if (ct & TCG_CT_CONST) {
163
return 1;
164
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tcg/tci/tcg-target.c.inc
167
+++ b/tcg/tci/tcg-target.c.inc
168
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
169
}
170
171
/* Test if a constant matches the constraint. */
172
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
173
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
174
{
175
return ct & TCG_CT_CONST;
176
}
177
--
178
2.34.1
179
180
diff view generated by jsdifflib
1
The only user can easily use translator_lduw and
1
From: Jiajie Chen <c@jia.je>
2
adjust the type to signed during the return.
3
2
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
3
Signed-off-by: Jiajie Chen <c@jia.je>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Message-Id: <20230908022302.180442-5-c@jia.je>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
7
---
9
include/exec/translator.h | 1 -
8
tcg/loongarch64/tcg-target-con-set.h | 1 +
10
target/i386/tcg/translate.c | 2 +-
9
tcg/loongarch64/tcg-target-con-str.h | 1 +
11
2 files changed, 1 insertion(+), 2 deletions(-)
10
tcg/loongarch64/tcg-target.c.inc | 65 ++++++++++++++++++++++++++++
11
3 files changed, 67 insertions(+)
12
12
13
diff --git a/include/exec/translator.h b/include/exec/translator.h
13
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
14
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/translator.h
15
--- a/tcg/loongarch64/tcg-target-con-set.h
16
+++ b/include/exec/translator.h
16
+++ b/tcg/loongarch64/tcg-target-con-set.h
17
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rZ)
18
18
C_O1_I2(r, rZ, ri)
19
#define FOR_EACH_TRANSLATOR_LD(F) \
19
C_O1_I2(r, rZ, rJ)
20
F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
20
C_O1_I2(r, rZ, rZ)
21
- F(translator_ldsw, int16_t, cpu_ldsw_code, bswap16) \
21
+C_O1_I2(w, w, wM)
22
F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
22
C_O1_I4(r, rZ, rJ, rZ, rZ)
23
F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
23
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
24
F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
25
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
26
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
27
--- a/target/i386/tcg/translate.c
25
--- a/tcg/loongarch64/tcg-target-con-str.h
28
+++ b/target/i386/tcg/translate.c
26
+++ b/tcg/loongarch64/tcg-target-con-str.h
29
@@ -XXX,XX +XXX,XX @@ static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
27
@@ -XXX,XX +XXX,XX @@ CONST('U', TCG_CT_CONST_U12)
30
28
CONST('Z', TCG_CT_CONST_ZERO)
31
static inline int16_t x86_ldsw_code(CPUX86State *env, DisasContext *s)
29
CONST('C', TCG_CT_CONST_C12)
32
{
30
CONST('W', TCG_CT_CONST_WSZ)
33
- return translator_ldsw(env, &s->base, advance_pc(env, s, 2));
31
+CONST('M', TCG_CT_CONST_VCMP)
34
+ return translator_lduw(env, &s->base, advance_pc(env, s, 2));
32
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/loongarch64/tcg-target.c.inc
35
+++ b/tcg/loongarch64/tcg-target.c.inc
36
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
37
#define TCG_CT_CONST_U12 0x800
38
#define TCG_CT_CONST_C12 0x1000
39
#define TCG_CT_CONST_WSZ 0x2000
40
+#define TCG_CT_CONST_VCMP 0x4000
41
42
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
43
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
44
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
45
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
46
return true;
47
}
48
+ int64_t vec_val = sextract64(val, 0, 8 << vece);
49
+ if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
50
+ return true;
51
+ }
52
return false;
35
}
53
}
36
54
37
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
56
TCGType type = vecl + TCG_TYPE_V64;
57
TCGArg a0, a1, a2;
58
TCGReg temp = TCG_REG_TMP0;
59
+ TCGReg temp_vec = TCG_VEC_TMP0;
60
+
61
+ static const LoongArchInsn cmp_vec_insn[16][4] = {
62
+ [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D},
63
+ [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D},
64
+ [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU},
65
+ [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D},
66
+ [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU},
67
+ };
68
+ static const LoongArchInsn cmp_vec_imm_insn[16][4] = {
69
+ [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D},
70
+ [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D},
71
+ [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU},
72
+ [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D},
73
+ [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
74
+ };
75
+ LoongArchInsn insn;
76
77
a0 = args[0];
78
a1 = args[1];
79
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
80
tcg_out_opc_vldx(s, a0, a1, temp);
81
}
82
break;
83
+ case INDEX_op_cmp_vec:
84
+ TCGCond cond = args[3];
85
+ if (const_args[2]) {
86
+ /*
87
+ * cmp_vec dest, src, value
88
+ * Try vseqi/vslei/vslti
89
+ */
90
+ int64_t value = sextract64(a2, 0, 8 << vece);
91
+ if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
92
+ cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
93
+ tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
94
+ a0, a1, value));
95
+ break;
96
+ } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
97
+ (0x00 <= value && value <= 0x1f)) {
98
+ tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
99
+ a0, a1, value));
100
+ break;
101
+ }
102
+
103
+ /*
104
+ * Fallback to:
105
+ * dupi_vec temp, a2
106
+ * cmp_vec a0, a1, temp, cond
107
+ */
108
+ tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
109
+ a2 = temp_vec;
110
+ }
111
+
112
+ insn = cmp_vec_insn[cond][vece];
113
+ if (insn == 0) {
114
+ TCGArg t;
115
+ t = a1, a1 = a2, a2 = t;
116
+ cond = tcg_swap_cond(cond);
117
+ insn = cmp_vec_insn[cond][vece];
118
+ tcg_debug_assert(insn != 0);
119
+ }
120
+ tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
121
+ break;
122
case INDEX_op_dupm_vec:
123
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
124
break;
125
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
126
case INDEX_op_st_vec:
127
case INDEX_op_dup_vec:
128
case INDEX_op_dupm_vec:
129
+ case INDEX_op_cmp_vec:
130
return 1;
131
default:
132
return 0;
133
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
134
case INDEX_op_st_vec:
135
return C_O0_I2(w, r);
136
137
+ case INDEX_op_cmp_vec:
138
+ return C_O1_I2(w, w, wM);
139
+
140
default:
141
g_assert_not_reached();
142
}
38
--
143
--
39
2.34.1
144
2.34.1
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
From: Jiajie Chen <c@jia.je>
2
2
3
Right now translator stops right *after* the end of a page, which
3
Lower the following ops:
4
breaks reporting of fault locations when the last instruction of a
5
multi-insn translation block crosses a page boundary.
6
4
7
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
- add_vec
6
- sub_vec
7
8
Signed-off-by: Jiajie Chen <c@jia.je>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20220817150506.592862-3-iii@linux.ibm.com>
10
Message-Id: <20230908022302.180442-6-c@jia.je>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
12
---
12
target/s390x/tcg/translate.c | 15 +++-
13
tcg/loongarch64/tcg-target-con-set.h | 1 +
13
tests/tcg/s390x/noexec.c | 106 +++++++++++++++++++++++
14
tcg/loongarch64/tcg-target-con-str.h | 1 +
14
tests/tcg/multiarch/noexec.c.inc | 139 +++++++++++++++++++++++++++++++
15
tcg/loongarch64/tcg-target.c.inc | 61 ++++++++++++++++++++++++++++
15
tests/tcg/s390x/Makefile.target | 1 +
16
3 files changed, 63 insertions(+)
16
4 files changed, 257 insertions(+), 4 deletions(-)
17
create mode 100644 tests/tcg/s390x/noexec.c
18
create mode 100644 tests/tcg/multiarch/noexec.c.inc
19
17
20
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
18
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
21
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
22
--- a/target/s390x/tcg/translate.c
20
--- a/tcg/loongarch64/tcg-target-con-set.h
23
+++ b/target/s390x/tcg/translate.c
21
+++ b/tcg/loongarch64/tcg-target-con-set.h
24
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
22
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, rZ, ri)
25
dc->insn_start = tcg_last_op();
23
C_O1_I2(r, rZ, rJ)
24
C_O1_I2(r, rZ, rZ)
25
C_O1_I2(w, w, wM)
26
+C_O1_I2(w, w, wA)
27
C_O1_I4(r, rZ, rJ, rZ, rZ)
28
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/tcg/loongarch64/tcg-target-con-str.h
31
+++ b/tcg/loongarch64/tcg-target-con-str.h
32
@@ -XXX,XX +XXX,XX @@ CONST('Z', TCG_CT_CONST_ZERO)
33
CONST('C', TCG_CT_CONST_C12)
34
CONST('W', TCG_CT_CONST_WSZ)
35
CONST('M', TCG_CT_CONST_VCMP)
36
+CONST('A', TCG_CT_CONST_VADD)
37
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
38
index XXXXXXX..XXXXXXX 100644
39
--- a/tcg/loongarch64/tcg-target.c.inc
40
+++ b/tcg/loongarch64/tcg-target.c.inc
41
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
42
#define TCG_CT_CONST_C12 0x1000
43
#define TCG_CT_CONST_WSZ 0x2000
44
#define TCG_CT_CONST_VCMP 0x4000
45
+#define TCG_CT_CONST_VADD 0x8000
46
47
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
48
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
49
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
50
if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
51
return true;
52
}
53
+ if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
54
+ return true;
55
+ }
56
return false;
26
}
57
}
27
58
28
+static target_ulong get_next_pc(CPUS390XState *env, DisasContext *s,
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
29
+ uint64_t pc)
60
}
61
}
62
63
+static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
64
+ const TCGArg a1, const TCGArg a2,
65
+ bool a2_is_const, bool is_add)
30
+{
66
+{
31
+ uint64_t insn = ld_code2(env, s, pc);
67
+ static const LoongArchInsn add_vec_insn[4] = {
32
+
68
+ OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
33
+ return pc + get_ilen((insn >> 8) & 0xff);
69
+ };
34
+}
70
+ static const LoongArchInsn add_vec_imm_insn[4] = {
35
+
71
+ OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
36
static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
72
+ };
37
{
73
+ static const LoongArchInsn sub_vec_insn[4] = {
38
CPUS390XState *env = cs->env_ptr;
74
+ OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
39
@@ -XXX,XX +XXX,XX @@ static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
75
+ };
40
76
+ static const LoongArchInsn sub_vec_imm_insn[4] = {
41
dc->base.is_jmp = translate_one(env, dc);
77
+ OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
42
if (dc->base.is_jmp == DISAS_NEXT) {
43
- uint64_t page_start;
44
-
45
- page_start = dc->base.pc_first & TARGET_PAGE_MASK;
46
- if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) {
47
+ if (!is_same_page(dcbase, dc->base.pc_next) ||
48
+ !is_same_page(dcbase, get_next_pc(env, dc, dc->base.pc_next)) ||
49
+ dc->ex_value) {
50
dc->base.is_jmp = DISAS_TOO_MANY;
51
}
52
}
53
diff --git a/tests/tcg/s390x/noexec.c b/tests/tcg/s390x/noexec.c
54
new file mode 100644
55
index XXXXXXX..XXXXXXX
56
--- /dev/null
57
+++ b/tests/tcg/s390x/noexec.c
58
@@ -XXX,XX +XXX,XX @@
59
+#include "../multiarch/noexec.c.inc"
60
+
61
+static void *arch_mcontext_pc(const mcontext_t *ctx)
62
+{
63
+ return (void *)ctx->psw.addr;
64
+}
65
+
66
+static int arch_mcontext_arg(const mcontext_t *ctx)
67
+{
68
+ return ctx->gregs[2];
69
+}
70
+
71
+static void arch_flush(void *p, int len)
72
+{
73
+}
74
+
75
+extern char noexec_1[];
76
+extern char noexec_2[];
77
+extern char noexec_end[];
78
+
79
+asm("noexec_1:\n"
80
+ " lgfi %r2,1\n" /* %r2 is 0 on entry, set 1. */
81
+ "noexec_2:\n"
82
+ " lgfi %r2,2\n" /* %r2 is 0/1; set 2. */
83
+ " br %r14\n" /* return */
84
+ "noexec_end:");
85
+
86
+extern char exrl_1[];
87
+extern char exrl_2[];
88
+extern char exrl_end[];
89
+
90
+asm("exrl_1:\n"
91
+ " exrl %r0, exrl_2\n"
92
+ " br %r14\n"
93
+ "exrl_2:\n"
94
+ " lgfi %r2,2\n"
95
+ "exrl_end:");
96
+
97
+int main(void)
98
+{
99
+ struct noexec_test noexec_tests[] = {
100
+ {
101
+ .name = "fallthrough",
102
+ .test_code = noexec_1,
103
+ .test_len = noexec_end - noexec_1,
104
+ .page_ofs = noexec_1 - noexec_2,
105
+ .entry_ofs = noexec_1 - noexec_2,
106
+ .expected_si_ofs = 0,
107
+ .expected_pc_ofs = 0,
108
+ .expected_arg = 1,
109
+ },
110
+ {
111
+ .name = "jump",
112
+ .test_code = noexec_1,
113
+ .test_len = noexec_end - noexec_1,
114
+ .page_ofs = noexec_1 - noexec_2,
115
+ .entry_ofs = 0,
116
+ .expected_si_ofs = 0,
117
+ .expected_pc_ofs = 0,
118
+ .expected_arg = 0,
119
+ },
120
+ {
121
+ .name = "exrl",
122
+ .test_code = exrl_1,
123
+ .test_len = exrl_end - exrl_1,
124
+ .page_ofs = exrl_1 - exrl_2,
125
+ .entry_ofs = exrl_1 - exrl_2,
126
+ .expected_si_ofs = 0,
127
+ .expected_pc_ofs = exrl_1 - exrl_2,
128
+ .expected_arg = 0,
129
+ },
130
+ {
131
+ .name = "fallthrough [cross]",
132
+ .test_code = noexec_1,
133
+ .test_len = noexec_end - noexec_1,
134
+ .page_ofs = noexec_1 - noexec_2 - 2,
135
+ .entry_ofs = noexec_1 - noexec_2 - 2,
136
+ .expected_si_ofs = 0,
137
+ .expected_pc_ofs = -2,
138
+ .expected_arg = 1,
139
+ },
140
+ {
141
+ .name = "jump [cross]",
142
+ .test_code = noexec_1,
143
+ .test_len = noexec_end - noexec_1,
144
+ .page_ofs = noexec_1 - noexec_2 - 2,
145
+ .entry_ofs = -2,
146
+ .expected_si_ofs = 0,
147
+ .expected_pc_ofs = -2,
148
+ .expected_arg = 0,
149
+ },
150
+ {
151
+ .name = "exrl [cross]",
152
+ .test_code = exrl_1,
153
+ .test_len = exrl_end - exrl_1,
154
+ .page_ofs = exrl_1 - exrl_2 - 2,
155
+ .entry_ofs = exrl_1 - exrl_2 - 2,
156
+ .expected_si_ofs = 0,
157
+ .expected_pc_ofs = exrl_1 - exrl_2 - 2,
158
+ .expected_arg = 0,
159
+ },
160
+ };
78
+ };
161
+
79
+
162
+ return test_noexec(noexec_tests,
80
+ if (a2_is_const) {
163
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
81
+ int64_t value = sextract64(a2, 0, 8 << vece);
164
+}
82
+ if (!is_add) {
165
diff --git a/tests/tcg/multiarch/noexec.c.inc b/tests/tcg/multiarch/noexec.c.inc
83
+ value = -value;
166
new file mode 100644
84
+ }
167
index XXXXXXX..XXXXXXX
168
--- /dev/null
169
+++ b/tests/tcg/multiarch/noexec.c.inc
170
@@ -XXX,XX +XXX,XX @@
171
+/*
172
+ * Common code for arch-specific MMU_INST_FETCH fault testing.
173
+ */
174
+
85
+
175
+#define _GNU_SOURCE
86
+ /* Try vaddi/vsubi */
87
+ if (0 <= value && value <= 0x1f) {
88
+ tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
89
+ a1, value));
90
+ return;
91
+ } else if (-0x1f <= value && value < 0) {
92
+ tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
93
+ a1, -value));
94
+ return;
95
+ }
176
+
96
+
177
+#include <assert.h>
97
+ /* constraint TCG_CT_CONST_VADD ensures unreachable */
178
+#include <signal.h>
98
+ g_assert_not_reached();
179
+#include <stdio.h>
180
+#include <stdlib.h>
181
+#include <string.h>
182
+#include <errno.h>
183
+#include <unistd.h>
184
+#include <sys/mman.h>
185
+#include <sys/ucontext.h>
186
+
187
+/* Forward declarations. */
188
+
189
+static void *arch_mcontext_pc(const mcontext_t *ctx);
190
+static int arch_mcontext_arg(const mcontext_t *ctx);
191
+static void arch_flush(void *p, int len);
192
+
193
+/* Testing infrastructure. */
194
+
195
+struct noexec_test {
196
+ const char *name;
197
+ const char *test_code;
198
+ int test_len;
199
+ int page_ofs;
200
+ int entry_ofs;
201
+ int expected_si_ofs;
202
+ int expected_pc_ofs;
203
+ int expected_arg;
204
+};
205
+
206
+static void *page_base;
207
+static int page_size;
208
+static const struct noexec_test *current_noexec_test;
209
+
210
+static void handle_err(const char *syscall)
211
+{
212
+ printf("[ FAILED ] %s: %s\n", syscall, strerror(errno));
213
+ exit(EXIT_FAILURE);
214
+}
215
+
216
+static void handle_segv(int sig, siginfo_t *info, void *ucontext)
217
+{
218
+ const struct noexec_test *test = current_noexec_test;
219
+ const mcontext_t *mc = &((ucontext_t *)ucontext)->uc_mcontext;
220
+ void *expected_si;
221
+ void *expected_pc;
222
+ void *pc;
223
+ int arg;
224
+
225
+ if (test == NULL) {
226
+ printf("[ FAILED ] unexpected SEGV\n");
227
+ exit(EXIT_FAILURE);
228
+ }
229
+ current_noexec_test = NULL;
230
+
231
+ expected_si = page_base + test->expected_si_ofs;
232
+ if (info->si_addr != expected_si) {
233
+ printf("[ FAILED ] wrong si_addr (%p != %p)\n",
234
+ info->si_addr, expected_si);
235
+ exit(EXIT_FAILURE);
236
+ }
99
+ }
237
+
100
+
238
+ pc = arch_mcontext_pc(mc);
101
+ if (is_add) {
239
+ expected_pc = page_base + test->expected_pc_ofs;
102
+ tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
240
+ if (pc != expected_pc) {
103
+ } else {
241
+ printf("[ FAILED ] wrong pc (%p != %p)\n", pc, expected_pc);
104
+ tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
242
+ exit(EXIT_FAILURE);
243
+ }
244
+
245
+ arg = arch_mcontext_arg(mc);
246
+ if (arg != test->expected_arg) {
247
+ printf("[ FAILED ] wrong arg (%d != %d)\n", arg, test->expected_arg);
248
+ exit(EXIT_FAILURE);
249
+ }
250
+
251
+ if (mprotect(page_base, page_size,
252
+ PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
253
+ handle_err("mprotect");
254
+ }
105
+ }
255
+}
106
+}
256
+
107
+
257
+static void test_noexec_1(const struct noexec_test *test)
108
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
258
+{
109
unsigned vecl, unsigned vece,
259
+ void *start = page_base + test->page_ofs;
110
const TCGArg args[TCG_MAX_OP_ARGS],
260
+ void (*fn)(int arg) = page_base + test->entry_ofs;
111
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
112
}
113
tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
114
break;
115
+ case INDEX_op_add_vec:
116
+ tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true);
117
+ break;
118
+ case INDEX_op_sub_vec:
119
+ tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
120
+ break;
121
case INDEX_op_dupm_vec:
122
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
123
break;
124
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
125
case INDEX_op_dup_vec:
126
case INDEX_op_dupm_vec:
127
case INDEX_op_cmp_vec:
128
+ case INDEX_op_add_vec:
129
+ case INDEX_op_sub_vec:
130
return 1;
131
default:
132
return 0;
133
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
134
case INDEX_op_cmp_vec:
135
return C_O1_I2(w, w, wM);
136
137
+ case INDEX_op_add_vec:
138
+ case INDEX_op_sub_vec:
139
+ return C_O1_I2(w, w, wA);
261
+
140
+
262
+ memcpy(start, test->test_code, test->test_len);
141
default:
263
+ arch_flush(start, test->test_len);
142
g_assert_not_reached();
264
+
143
}
265
+ /* Trigger TB creation in order to test invalidation. */
266
+ fn(0);
267
+
268
+ if (mprotect(page_base, page_size, PROT_NONE) < 0) {
269
+ handle_err("mprotect");
270
+ }
271
+
272
+ /* Trigger SEGV and check that handle_segv() ran. */
273
+ current_noexec_test = test;
274
+ fn(0);
275
+ assert(current_noexec_test == NULL);
276
+}
277
+
278
+static int test_noexec(struct noexec_test *tests, size_t n_tests)
279
+{
280
+ struct sigaction act;
281
+ size_t i;
282
+
283
+ memset(&act, 0, sizeof(act));
284
+ act.sa_sigaction = handle_segv;
285
+ act.sa_flags = SA_SIGINFO;
286
+ if (sigaction(SIGSEGV, &act, NULL) < 0) {
287
+ handle_err("sigaction");
288
+ }
289
+
290
+ page_size = getpagesize();
291
+ page_base = mmap(NULL, 2 * page_size,
292
+ PROT_READ | PROT_WRITE | PROT_EXEC,
293
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
294
+ if (page_base == MAP_FAILED) {
295
+ handle_err("mmap");
296
+ }
297
+ page_base += page_size;
298
+
299
+ for (i = 0; i < n_tests; i++) {
300
+ struct noexec_test *test = &tests[i];
301
+
302
+ printf("[ RUN ] %s\n", test->name);
303
+ test_noexec_1(test);
304
+ printf("[ OK ]\n");
305
+ }
306
+
307
+ printf("[ PASSED ]\n");
308
+ return EXIT_SUCCESS;
309
+}
310
diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target
311
index XXXXXXX..XXXXXXX 100644
312
--- a/tests/tcg/s390x/Makefile.target
313
+++ b/tests/tcg/s390x/Makefile.target
314
@@ -XXX,XX +XXX,XX @@ TESTS+=shift
315
TESTS+=trap
316
TESTS+=signals-s390x
317
TESTS+=branch-relative-long
318
+TESTS+=noexec
319
320
Z14_TESTS=vfminmax
321
vfminmax: LDFLAGS+=-lm
322
--
144
--
323
2.34.1
145
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Lower the following ops:
4
5
- and_vec
6
- andc_vec
7
- or_vec
8
- orc_vec
9
- xor_vec
10
- nor_vec
11
- not_vec
12
13
Signed-off-by: Jiajie Chen <c@jia.je>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-Id: <20230908022302.180442-7-c@jia.je>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
18
tcg/loongarch64/tcg-target-con-set.h | 2 ++
19
tcg/loongarch64/tcg-target.h | 8 ++---
20
tcg/loongarch64/tcg-target.c.inc | 44 ++++++++++++++++++++++++++++
21
3 files changed, 50 insertions(+), 4 deletions(-)
22
23
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/loongarch64/tcg-target-con-set.h
26
+++ b/tcg/loongarch64/tcg-target-con-set.h
27
@@ -XXX,XX +XXX,XX @@ C_O0_I2(rZ, rZ)
28
C_O0_I2(w, r)
29
C_O1_I1(r, r)
30
C_O1_I1(w, r)
31
+C_O1_I1(w, w)
32
C_O1_I2(r, r, rC)
33
C_O1_I2(r, r, ri)
34
C_O1_I2(r, r, rI)
35
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rZ)
36
C_O1_I2(r, rZ, ri)
37
C_O1_I2(r, rZ, rJ)
38
C_O1_I2(r, rZ, rZ)
39
+C_O1_I2(w, w, w)
40
C_O1_I2(w, w, wM)
41
C_O1_I2(w, w, wA)
42
C_O1_I4(r, rZ, rJ, rZ, rZ)
43
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/loongarch64/tcg-target.h
46
+++ b/tcg/loongarch64/tcg-target.h
47
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
48
#define TCG_TARGET_HAS_v128 use_lsx_instructions
49
#define TCG_TARGET_HAS_v256 0
50
51
-#define TCG_TARGET_HAS_not_vec 0
52
+#define TCG_TARGET_HAS_not_vec 1
53
#define TCG_TARGET_HAS_neg_vec 0
54
#define TCG_TARGET_HAS_abs_vec 0
55
-#define TCG_TARGET_HAS_andc_vec 0
56
-#define TCG_TARGET_HAS_orc_vec 0
57
+#define TCG_TARGET_HAS_andc_vec 1
58
+#define TCG_TARGET_HAS_orc_vec 1
59
#define TCG_TARGET_HAS_nand_vec 0
60
-#define TCG_TARGET_HAS_nor_vec 0
61
+#define TCG_TARGET_HAS_nor_vec 1
62
#define TCG_TARGET_HAS_eqv_vec 0
63
#define TCG_TARGET_HAS_mul_vec 0
64
#define TCG_TARGET_HAS_shi_vec 0
65
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/loongarch64/tcg-target.c.inc
68
+++ b/tcg/loongarch64/tcg-target.c.inc
69
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
70
tcg_out_opc_vldx(s, a0, a1, temp);
71
}
72
break;
73
+ case INDEX_op_and_vec:
74
+ tcg_out_opc_vand_v(s, a0, a1, a2);
75
+ break;
76
+ case INDEX_op_andc_vec:
77
+ /*
78
+ * vandn vd, vj, vk: vd = vk & ~vj
79
+ * andc_vec vd, vj, vk: vd = vj & ~vk
80
+ * vk and vk are swapped
81
+ */
82
+ tcg_out_opc_vandn_v(s, a0, a2, a1);
83
+ break;
84
+ case INDEX_op_or_vec:
85
+ tcg_out_opc_vor_v(s, a0, a1, a2);
86
+ break;
87
+ case INDEX_op_orc_vec:
88
+ tcg_out_opc_vorn_v(s, a0, a1, a2);
89
+ break;
90
+ case INDEX_op_xor_vec:
91
+ tcg_out_opc_vxor_v(s, a0, a1, a2);
92
+ break;
93
+ case INDEX_op_nor_vec:
94
+ tcg_out_opc_vnor_v(s, a0, a1, a2);
95
+ break;
96
+ case INDEX_op_not_vec:
97
+ tcg_out_opc_vnor_v(s, a0, a1, a1);
98
+ break;
99
case INDEX_op_cmp_vec:
100
TCGCond cond = args[3];
101
if (const_args[2]) {
102
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
103
case INDEX_op_cmp_vec:
104
case INDEX_op_add_vec:
105
case INDEX_op_sub_vec:
106
+ case INDEX_op_and_vec:
107
+ case INDEX_op_andc_vec:
108
+ case INDEX_op_or_vec:
109
+ case INDEX_op_orc_vec:
110
+ case INDEX_op_xor_vec:
111
+ case INDEX_op_nor_vec:
112
+ case INDEX_op_not_vec:
113
return 1;
114
default:
115
return 0;
116
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
117
case INDEX_op_sub_vec:
118
return C_O1_I2(w, w, wA);
119
120
+ case INDEX_op_and_vec:
121
+ case INDEX_op_andc_vec:
122
+ case INDEX_op_or_vec:
123
+ case INDEX_op_orc_vec:
124
+ case INDEX_op_xor_vec:
125
+ case INDEX_op_nor_vec:
126
+ return C_O1_I2(w, w, w);
127
+
128
+ case INDEX_op_not_vec:
129
+ return C_O1_I1(w, w);
130
+
131
default:
132
g_assert_not_reached();
133
}
134
--
135
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-8-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/loongarch64/tcg-target.h | 2 +-
9
tcg/loongarch64/tcg-target.c.inc | 8 ++++++++
10
2 files changed, 9 insertions(+), 1 deletion(-)
11
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.h
15
+++ b/tcg/loongarch64/tcg-target.h
16
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
17
#define TCG_TARGET_HAS_v256 0
18
19
#define TCG_TARGET_HAS_not_vec 1
20
-#define TCG_TARGET_HAS_neg_vec 0
21
+#define TCG_TARGET_HAS_neg_vec 1
22
#define TCG_TARGET_HAS_abs_vec 0
23
#define TCG_TARGET_HAS_andc_vec 1
24
#define TCG_TARGET_HAS_orc_vec 1
25
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/loongarch64/tcg-target.c.inc
28
+++ b/tcg/loongarch64/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
30
[TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
31
};
32
LoongArchInsn insn;
33
+ static const LoongArchInsn neg_vec_insn[4] = {
34
+ OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
35
+ };
36
37
a0 = args[0];
38
a1 = args[1];
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
40
case INDEX_op_sub_vec:
41
tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
42
break;
43
+ case INDEX_op_neg_vec:
44
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
45
+ break;
46
case INDEX_op_dupm_vec:
47
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
48
break;
49
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
50
case INDEX_op_xor_vec:
51
case INDEX_op_nor_vec:
52
case INDEX_op_not_vec:
53
+ case INDEX_op_neg_vec:
54
return 1;
55
default:
56
return 0;
57
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
58
return C_O1_I2(w, w, w);
59
60
case INDEX_op_not_vec:
61
+ case INDEX_op_neg_vec:
62
return C_O1_I1(w, w);
63
64
default:
65
--
66
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-9-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/loongarch64/tcg-target.h | 2 +-
9
tcg/loongarch64/tcg-target.c.inc | 8 ++++++++
10
2 files changed, 9 insertions(+), 1 deletion(-)
11
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.h
15
+++ b/tcg/loongarch64/tcg-target.h
16
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
17
#define TCG_TARGET_HAS_nand_vec 0
18
#define TCG_TARGET_HAS_nor_vec 1
19
#define TCG_TARGET_HAS_eqv_vec 0
20
-#define TCG_TARGET_HAS_mul_vec 0
21
+#define TCG_TARGET_HAS_mul_vec 1
22
#define TCG_TARGET_HAS_shi_vec 0
23
#define TCG_TARGET_HAS_shs_vec 0
24
#define TCG_TARGET_HAS_shv_vec 0
25
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/loongarch64/tcg-target.c.inc
28
+++ b/tcg/loongarch64/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
30
static const LoongArchInsn neg_vec_insn[4] = {
31
OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
32
};
33
+ static const LoongArchInsn mul_vec_insn[4] = {
34
+ OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
35
+ };
36
37
a0 = args[0];
38
a1 = args[1];
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
40
case INDEX_op_neg_vec:
41
tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
42
break;
43
+ case INDEX_op_mul_vec:
44
+ tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
45
+ break;
46
case INDEX_op_dupm_vec:
47
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
48
break;
49
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
50
case INDEX_op_nor_vec:
51
case INDEX_op_not_vec:
52
case INDEX_op_neg_vec:
53
+ case INDEX_op_mul_vec:
54
return 1;
55
default:
56
return 0;
57
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
58
case INDEX_op_orc_vec:
59
case INDEX_op_xor_vec:
60
case INDEX_op_nor_vec:
61
+ case INDEX_op_mul_vec:
62
return C_O1_I2(w, w, w);
63
64
case INDEX_op_not_vec:
65
--
66
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Lower the following ops:
4
5
- smin_vec
6
- smax_vec
7
- umin_vec
8
- umax_vec
9
10
Signed-off-by: Jiajie Chen <c@jia.je>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-Id: <20230908022302.180442-10-c@jia.je>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
15
tcg/loongarch64/tcg-target.h | 2 +-
16
tcg/loongarch64/tcg-target.c.inc | 32 ++++++++++++++++++++++++++++++++
17
2 files changed, 33 insertions(+), 1 deletion(-)
18
19
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/loongarch64/tcg-target.h
22
+++ b/tcg/loongarch64/tcg-target.h
23
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
24
#define TCG_TARGET_HAS_rots_vec 0
25
#define TCG_TARGET_HAS_rotv_vec 0
26
#define TCG_TARGET_HAS_sat_vec 0
27
-#define TCG_TARGET_HAS_minmax_vec 0
28
+#define TCG_TARGET_HAS_minmax_vec 1
29
#define TCG_TARGET_HAS_bitsel_vec 0
30
#define TCG_TARGET_HAS_cmpsel_vec 0
31
32
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/loongarch64/tcg-target.c.inc
35
+++ b/tcg/loongarch64/tcg-target.c.inc
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
37
static const LoongArchInsn mul_vec_insn[4] = {
38
OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
39
};
40
+ static const LoongArchInsn smin_vec_insn[4] = {
41
+ OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D
42
+ };
43
+ static const LoongArchInsn umin_vec_insn[4] = {
44
+ OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU
45
+ };
46
+ static const LoongArchInsn smax_vec_insn[4] = {
47
+ OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D
48
+ };
49
+ static const LoongArchInsn umax_vec_insn[4] = {
50
+ OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
51
+ };
52
53
a0 = args[0];
54
a1 = args[1];
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
56
case INDEX_op_mul_vec:
57
tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
58
break;
59
+ case INDEX_op_smin_vec:
60
+ tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2));
61
+ break;
62
+ case INDEX_op_smax_vec:
63
+ tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2));
64
+ break;
65
+ case INDEX_op_umin_vec:
66
+ tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2));
67
+ break;
68
+ case INDEX_op_umax_vec:
69
+ tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
70
+ break;
71
case INDEX_op_dupm_vec:
72
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
73
break;
74
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
75
case INDEX_op_not_vec:
76
case INDEX_op_neg_vec:
77
case INDEX_op_mul_vec:
78
+ case INDEX_op_smin_vec:
79
+ case INDEX_op_smax_vec:
80
+ case INDEX_op_umin_vec:
81
+ case INDEX_op_umax_vec:
82
return 1;
83
default:
84
return 0;
85
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
86
case INDEX_op_xor_vec:
87
case INDEX_op_nor_vec:
88
case INDEX_op_mul_vec:
89
+ case INDEX_op_smin_vec:
90
+ case INDEX_op_smax_vec:
91
+ case INDEX_op_umin_vec:
92
+ case INDEX_op_umax_vec:
93
return C_O1_I2(w, w, w);
94
95
case INDEX_op_not_vec:
96
--
97
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Lower the following ops:
4
5
- ssadd_vec
6
- usadd_vec
7
- sssub_vec
8
- ussub_vec
9
10
Signed-off-by: Jiajie Chen <c@jia.je>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-Id: <20230908022302.180442-11-c@jia.je>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
15
tcg/loongarch64/tcg-target.h | 2 +-
16
tcg/loongarch64/tcg-target.c.inc | 32 ++++++++++++++++++++++++++++++++
17
2 files changed, 33 insertions(+), 1 deletion(-)
18
19
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/loongarch64/tcg-target.h
22
+++ b/tcg/loongarch64/tcg-target.h
23
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
24
#define TCG_TARGET_HAS_roti_vec 0
25
#define TCG_TARGET_HAS_rots_vec 0
26
#define TCG_TARGET_HAS_rotv_vec 0
27
-#define TCG_TARGET_HAS_sat_vec 0
28
+#define TCG_TARGET_HAS_sat_vec 1
29
#define TCG_TARGET_HAS_minmax_vec 1
30
#define TCG_TARGET_HAS_bitsel_vec 0
31
#define TCG_TARGET_HAS_cmpsel_vec 0
32
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/loongarch64/tcg-target.c.inc
35
+++ b/tcg/loongarch64/tcg-target.c.inc
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
37
static const LoongArchInsn umax_vec_insn[4] = {
38
OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
39
};
40
+ static const LoongArchInsn ssadd_vec_insn[4] = {
41
+ OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D
42
+ };
43
+ static const LoongArchInsn usadd_vec_insn[4] = {
44
+ OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU
45
+ };
46
+ static const LoongArchInsn sssub_vec_insn[4] = {
47
+ OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D
48
+ };
49
+ static const LoongArchInsn ussub_vec_insn[4] = {
50
+ OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
51
+ };
52
53
a0 = args[0];
54
a1 = args[1];
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
56
case INDEX_op_umax_vec:
57
tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
58
break;
59
+ case INDEX_op_ssadd_vec:
60
+ tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2));
61
+ break;
62
+ case INDEX_op_usadd_vec:
63
+ tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2));
64
+ break;
65
+ case INDEX_op_sssub_vec:
66
+ tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2));
67
+ break;
68
+ case INDEX_op_ussub_vec:
69
+ tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
70
+ break;
71
case INDEX_op_dupm_vec:
72
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
73
break;
74
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
75
case INDEX_op_smax_vec:
76
case INDEX_op_umin_vec:
77
case INDEX_op_umax_vec:
78
+ case INDEX_op_ssadd_vec:
79
+ case INDEX_op_usadd_vec:
80
+ case INDEX_op_sssub_vec:
81
+ case INDEX_op_ussub_vec:
82
return 1;
83
default:
84
return 0;
85
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
86
case INDEX_op_smax_vec:
87
case INDEX_op_umin_vec:
88
case INDEX_op_umax_vec:
89
+ case INDEX_op_ssadd_vec:
90
+ case INDEX_op_usadd_vec:
91
+ case INDEX_op_sssub_vec:
92
+ case INDEX_op_ussub_vec:
93
return C_O1_I2(w, w, w);
94
95
case INDEX_op_not_vec:
96
--
97
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Lower the following ops:
4
5
- shlv_vec
6
- shrv_vec
7
- sarv_vec
8
9
Signed-off-by: Jiajie Chen <c@jia.je>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20230908022302.180442-12-c@jia.je>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
tcg/loongarch64/tcg-target.h | 2 +-
15
tcg/loongarch64/tcg-target.c.inc | 24 ++++++++++++++++++++++++
16
2 files changed, 25 insertions(+), 1 deletion(-)
17
18
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/loongarch64/tcg-target.h
21
+++ b/tcg/loongarch64/tcg-target.h
22
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
23
#define TCG_TARGET_HAS_mul_vec 1
24
#define TCG_TARGET_HAS_shi_vec 0
25
#define TCG_TARGET_HAS_shs_vec 0
26
-#define TCG_TARGET_HAS_shv_vec 0
27
+#define TCG_TARGET_HAS_shv_vec 1
28
#define TCG_TARGET_HAS_roti_vec 0
29
#define TCG_TARGET_HAS_rots_vec 0
30
#define TCG_TARGET_HAS_rotv_vec 0
31
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/loongarch64/tcg-target.c.inc
34
+++ b/tcg/loongarch64/tcg-target.c.inc
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
36
static const LoongArchInsn ussub_vec_insn[4] = {
37
OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
38
};
39
+ static const LoongArchInsn shlv_vec_insn[4] = {
40
+ OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D
41
+ };
42
+ static const LoongArchInsn shrv_vec_insn[4] = {
43
+ OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D
44
+ };
45
+ static const LoongArchInsn sarv_vec_insn[4] = {
46
+ OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
47
+ };
48
49
a0 = args[0];
50
a1 = args[1];
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
52
case INDEX_op_ussub_vec:
53
tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
54
break;
55
+ case INDEX_op_shlv_vec:
56
+ tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2));
57
+ break;
58
+ case INDEX_op_shrv_vec:
59
+ tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2));
60
+ break;
61
+ case INDEX_op_sarv_vec:
62
+ tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
63
+ break;
64
case INDEX_op_dupm_vec:
65
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
66
break;
67
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
68
case INDEX_op_usadd_vec:
69
case INDEX_op_sssub_vec:
70
case INDEX_op_ussub_vec:
71
+ case INDEX_op_shlv_vec:
72
+ case INDEX_op_shrv_vec:
73
+ case INDEX_op_sarv_vec:
74
return 1;
75
default:
76
return 0;
77
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
78
case INDEX_op_usadd_vec:
79
case INDEX_op_sssub_vec:
80
case INDEX_op_ussub_vec:
81
+ case INDEX_op_shlv_vec:
82
+ case INDEX_op_shrv_vec:
83
+ case INDEX_op_sarv_vec:
84
return C_O1_I2(w, w, w);
85
86
case INDEX_op_not_vec:
87
--
88
2.34.1
diff view generated by jsdifflib
1
The mmap_lock is held around tb_gen_code. While the comment
1
From: Jiajie Chen <c@jia.je>
2
is correct that the lock is dropped when tb_gen_code runs out
3
of memory, the lock is *not* dropped when an exception is
4
raised reading code for translation.
5
2
6
Acked-by: Alistair Francis <alistair.francis@wdc.com>
3
Signed-off-by: Jiajie Chen <c@jia.je>
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Message-Id: <20230908022302.180442-13-c@jia.je>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
accel/tcg/cpu-exec.c | 12 ++++++------
8
tcg/loongarch64/tcg-target-con-set.h | 1 +
12
accel/tcg/user-exec.c | 3 ---
9
tcg/loongarch64/tcg-target.h | 2 +-
13
2 files changed, 6 insertions(+), 9 deletions(-)
10
tcg/loongarch64/tcg-target.c.inc | 11 ++++++++++-
11
3 files changed, 12 insertions(+), 2 deletions(-)
14
12
15
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
13
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
16
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
17
--- a/accel/tcg/cpu-exec.c
15
--- a/tcg/loongarch64/tcg-target-con-set.h
18
+++ b/accel/tcg/cpu-exec.c
16
+++ b/tcg/loongarch64/tcg-target-con-set.h
19
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, rZ, rZ)
20
cpu_tb_exec(cpu, tb, &tb_exit);
18
C_O1_I2(w, w, w)
21
cpu_exec_exit(cpu);
19
C_O1_I2(w, w, wM)
22
} else {
20
C_O1_I2(w, w, wA)
23
- /*
21
+C_O1_I3(w, w, w, w)
24
- * The mmap_lock is dropped by tb_gen_code if it runs out of
22
C_O1_I4(r, rZ, rJ, rZ, rZ)
25
- * memory.
23
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
26
- */
27
#ifndef CONFIG_SOFTMMU
28
clear_helper_retaddr();
29
- tcg_debug_assert(!have_mmap_lock());
30
+ if (have_mmap_lock()) {
31
+ mmap_unlock();
32
+ }
33
#endif
34
if (qemu_mutex_iothread_locked()) {
35
qemu_mutex_unlock_iothread();
36
@@ -XXX,XX +XXX,XX @@ int cpu_exec(CPUState *cpu)
37
38
#ifndef CONFIG_SOFTMMU
39
clear_helper_retaddr();
40
- tcg_debug_assert(!have_mmap_lock());
41
+ if (have_mmap_lock()) {
42
+ mmap_unlock();
43
+ }
44
#endif
45
if (qemu_mutex_iothread_locked()) {
46
qemu_mutex_unlock_iothread();
47
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
48
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
49
--- a/accel/tcg/user-exec.c
25
--- a/tcg/loongarch64/tcg-target.h
50
+++ b/accel/tcg/user-exec.c
26
+++ b/tcg/loongarch64/tcg-target.h
51
@@ -XXX,XX +XXX,XX @@ MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
27
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
52
* (and if the translator doesn't handle page boundaries correctly
28
#define TCG_TARGET_HAS_rotv_vec 0
53
* there's little we can do about that here). Therefore, do not
29
#define TCG_TARGET_HAS_sat_vec 1
54
* trigger the unwinder.
30
#define TCG_TARGET_HAS_minmax_vec 1
55
- *
31
-#define TCG_TARGET_HAS_bitsel_vec 0
56
- * Like tb_gen_code, release the memory lock before cpu_loop_exit.
32
+#define TCG_TARGET_HAS_bitsel_vec 1
57
*/
33
#define TCG_TARGET_HAS_cmpsel_vec 0
58
- mmap_unlock();
34
59
*pc = 0;
35
#define TCG_TARGET_DEFAULT_MO (0)
60
return MMU_INST_FETCH;
36
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
37
index XXXXXXX..XXXXXXX 100644
38
--- a/tcg/loongarch64/tcg-target.c.inc
39
+++ b/tcg/loongarch64/tcg-target.c.inc
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
41
const int const_args[TCG_MAX_OP_ARGS])
42
{
43
TCGType type = vecl + TCG_TYPE_V64;
44
- TCGArg a0, a1, a2;
45
+ TCGArg a0, a1, a2, a3;
46
TCGReg temp = TCG_REG_TMP0;
47
TCGReg temp_vec = TCG_VEC_TMP0;
48
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
50
a0 = args[0];
51
a1 = args[1];
52
a2 = args[2];
53
+ a3 = args[3];
54
55
/* Currently only supports V128 */
56
tcg_debug_assert(type == TCG_TYPE_V128);
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
58
case INDEX_op_sarv_vec:
59
tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
60
break;
61
+ case INDEX_op_bitsel_vec:
62
+ /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
63
+ tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
64
+ break;
65
case INDEX_op_dupm_vec:
66
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
67
break;
68
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
69
case INDEX_op_shlv_vec:
70
case INDEX_op_shrv_vec:
71
case INDEX_op_sarv_vec:
72
+ case INDEX_op_bitsel_vec:
73
return 1;
74
default:
75
return 0;
76
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
77
case INDEX_op_neg_vec:
78
return C_O1_I1(w, w);
79
80
+ case INDEX_op_bitsel_vec:
81
+ return C_O1_I3(w, w, w, w);
82
+
83
default:
84
g_assert_not_reached();
61
}
85
}
62
--
86
--
63
2.34.1
87
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Lower the following ops:
4
5
- shli_vec
6
- shrv_vec
7
- sarv_vec
8
9
Signed-off-by: Jiajie Chen <c@jia.je>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20230908022302.180442-14-c@jia.je>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
tcg/loongarch64/tcg-target.h | 2 +-
15
tcg/loongarch64/tcg-target.c.inc | 21 +++++++++++++++++++++
16
2 files changed, 22 insertions(+), 1 deletion(-)
17
18
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/tcg/loongarch64/tcg-target.h
21
+++ b/tcg/loongarch64/tcg-target.h
22
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
23
#define TCG_TARGET_HAS_nor_vec 1
24
#define TCG_TARGET_HAS_eqv_vec 0
25
#define TCG_TARGET_HAS_mul_vec 1
26
-#define TCG_TARGET_HAS_shi_vec 0
27
+#define TCG_TARGET_HAS_shi_vec 1
28
#define TCG_TARGET_HAS_shs_vec 0
29
#define TCG_TARGET_HAS_shv_vec 1
30
#define TCG_TARGET_HAS_roti_vec 0
31
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
32
index XXXXXXX..XXXXXXX 100644
33
--- a/tcg/loongarch64/tcg-target.c.inc
34
+++ b/tcg/loongarch64/tcg-target.c.inc
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
36
static const LoongArchInsn sarv_vec_insn[4] = {
37
OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
38
};
39
+ static const LoongArchInsn shli_vec_insn[4] = {
40
+ OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D
41
+ };
42
+ static const LoongArchInsn shri_vec_insn[4] = {
43
+ OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D
44
+ };
45
+ static const LoongArchInsn sari_vec_insn[4] = {
46
+ OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
47
+ };
48
49
a0 = args[0];
50
a1 = args[1];
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
52
case INDEX_op_sarv_vec:
53
tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
54
break;
55
+ case INDEX_op_shli_vec:
56
+ tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2));
57
+ break;
58
+ case INDEX_op_shri_vec:
59
+ tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2));
60
+ break;
61
+ case INDEX_op_sari_vec:
62
+ tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
63
+ break;
64
case INDEX_op_bitsel_vec:
65
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
66
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
67
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
68
69
case INDEX_op_not_vec:
70
case INDEX_op_neg_vec:
71
+ case INDEX_op_shli_vec:
72
+ case INDEX_op_shri_vec:
73
+ case INDEX_op_sari_vec:
74
return C_O1_I1(w, w);
75
76
case INDEX_op_bitsel_vec:
77
--
78
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Lower the following ops:
4
5
- rotrv_vec
6
- rotlv_vec
7
8
Signed-off-by: Jiajie Chen <c@jia.je>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230908022302.180442-15-c@jia.je>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
tcg/loongarch64/tcg-target.h | 2 +-
14
tcg/loongarch64/tcg-target.c.inc | 14 ++++++++++++++
15
2 files changed, 15 insertions(+), 1 deletion(-)
16
17
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/tcg/loongarch64/tcg-target.h
20
+++ b/tcg/loongarch64/tcg-target.h
21
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
22
#define TCG_TARGET_HAS_shv_vec 1
23
#define TCG_TARGET_HAS_roti_vec 0
24
#define TCG_TARGET_HAS_rots_vec 0
25
-#define TCG_TARGET_HAS_rotv_vec 0
26
+#define TCG_TARGET_HAS_rotv_vec 1
27
#define TCG_TARGET_HAS_sat_vec 1
28
#define TCG_TARGET_HAS_minmax_vec 1
29
#define TCG_TARGET_HAS_bitsel_vec 1
30
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/loongarch64/tcg-target.c.inc
33
+++ b/tcg/loongarch64/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
35
static const LoongArchInsn sari_vec_insn[4] = {
36
OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
37
};
38
+ static const LoongArchInsn rotrv_vec_insn[4] = {
39
+ OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D
40
+ };
41
42
a0 = args[0];
43
a1 = args[1];
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
45
case INDEX_op_sari_vec:
46
tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
47
break;
48
+ case INDEX_op_rotrv_vec:
49
+ tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2));
50
+ break;
51
+ case INDEX_op_rotlv_vec:
52
+ /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
53
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2));
54
+ tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
55
+ temp_vec));
56
+ break;
57
case INDEX_op_bitsel_vec:
58
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
59
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
60
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
61
case INDEX_op_shlv_vec:
62
case INDEX_op_shrv_vec:
63
case INDEX_op_sarv_vec:
64
+ case INDEX_op_rotrv_vec:
65
+ case INDEX_op_rotlv_vec:
66
return C_O1_I2(w, w, w);
67
68
case INDEX_op_not_vec:
69
--
70
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-16-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/loongarch64/tcg-target.h | 2 +-
9
tcg/loongarch64/tcg-target.c.inc | 21 +++++++++++++++++++++
10
2 files changed, 22 insertions(+), 1 deletion(-)
11
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.h
15
+++ b/tcg/loongarch64/tcg-target.h
16
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
17
#define TCG_TARGET_HAS_shi_vec 1
18
#define TCG_TARGET_HAS_shs_vec 0
19
#define TCG_TARGET_HAS_shv_vec 1
20
-#define TCG_TARGET_HAS_roti_vec 0
21
+#define TCG_TARGET_HAS_roti_vec 1
22
#define TCG_TARGET_HAS_rots_vec 0
23
#define TCG_TARGET_HAS_rotv_vec 1
24
#define TCG_TARGET_HAS_sat_vec 1
25
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/loongarch64/tcg-target.c.inc
28
+++ b/tcg/loongarch64/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
30
tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
31
temp_vec));
32
break;
33
+ case INDEX_op_rotli_vec:
34
+ /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
35
+ a2 = extract32(-a2, 0, 3 + vece);
36
+ switch (vece) {
37
+ case MO_8:
38
+ tcg_out_opc_vrotri_b(s, a0, a1, a2);
39
+ break;
40
+ case MO_16:
41
+ tcg_out_opc_vrotri_h(s, a0, a1, a2);
42
+ break;
43
+ case MO_32:
44
+ tcg_out_opc_vrotri_w(s, a0, a1, a2);
45
+ break;
46
+ case MO_64:
47
+ tcg_out_opc_vrotri_d(s, a0, a1, a2);
48
+ break;
49
+ default:
50
+ g_assert_not_reached();
51
+ }
52
+ break;
53
case INDEX_op_bitsel_vec:
54
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
55
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
56
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
57
case INDEX_op_shli_vec:
58
case INDEX_op_shri_vec:
59
case INDEX_op_sari_vec:
60
+ case INDEX_op_rotli_vec:
61
return C_O1_I1(w, w);
62
63
case INDEX_op_bitsel_vec:
64
--
65
2.34.1
diff view generated by jsdifflib
1
These will be useful in properly ending the TB.
1
From: Jiajie Chen <c@jia.je>
2
2
3
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
3
If LSX is available, use LSX instructions to implement 128-bit load &
4
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
store when MO_128 is required, otherwise use two 64-bit loads & stores.
5
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
6
Signed-off-by: Jiajie Chen <c@jia.je>
7
Message-Id: <20230908022302.180442-17-c@jia.je>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
10
---
8
target/riscv/translate.c | 10 +++++++++-
11
tcg/loongarch64/tcg-target-con-set.h | 2 +
9
1 file changed, 9 insertions(+), 1 deletion(-)
12
tcg/loongarch64/tcg-target.h | 2 +-
13
tcg/loongarch64/tcg-target.c.inc | 63 ++++++++++++++++++++++++++++
14
3 files changed, 66 insertions(+), 1 deletion(-)
10
15
11
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
16
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
12
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/translate.c
18
--- a/tcg/loongarch64/tcg-target-con-set.h
14
+++ b/target/riscv/translate.c
19
+++ b/tcg/loongarch64/tcg-target-con-set.h
15
@@ -XXX,XX +XXX,XX @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
20
@@ -XXX,XX +XXX,XX @@ C_O0_I1(r)
16
/* Include decoders for factored-out extensions */
21
C_O0_I2(rZ, r)
17
#include "decode-XVentanaCondOps.c.inc"
22
C_O0_I2(rZ, rZ)
18
23
C_O0_I2(w, r)
19
+/* The specification allows for longer insns, but not supported by qemu. */
24
+C_O0_I3(r, r, r)
20
+#define MAX_INSN_LEN 4
25
C_O1_I1(r, r)
26
C_O1_I1(w, r)
27
C_O1_I1(w, w)
28
@@ -XXX,XX +XXX,XX @@ C_O1_I2(w, w, wM)
29
C_O1_I2(w, w, wA)
30
C_O1_I3(w, w, w, w)
31
C_O1_I4(r, rZ, rJ, rZ, rZ)
32
+C_O2_I1(r, r, r)
33
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/loongarch64/tcg-target.h
36
+++ b/tcg/loongarch64/tcg-target.h
37
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
38
#define TCG_TARGET_HAS_muluh_i64 1
39
#define TCG_TARGET_HAS_mulsh_i64 1
40
41
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
42
+#define TCG_TARGET_HAS_qemu_ldst_i128 use_lsx_instructions
43
44
#define TCG_TARGET_HAS_v64 0
45
#define TCG_TARGET_HAS_v128 use_lsx_instructions
46
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/loongarch64/tcg-target.c.inc
49
+++ b/tcg/loongarch64/tcg-target.c.inc
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
51
}
52
}
53
54
+static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
55
+ TCGReg addr_reg, MemOpIdx oi, bool is_ld)
56
+{
57
+ TCGLabelQemuLdst *ldst;
58
+ HostAddress h;
21
+
59
+
22
+static inline int insn_len(uint16_t first_word)
60
+ ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
23
+{
61
+
24
+ return (first_word & 3) == 3 ? 4 : 2;
62
+ if (h.aa.atom == MO_128) {
63
+ /*
64
+ * Use VLDX/VSTX when 128-bit atomicity is required.
65
+ * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
66
+ */
67
+ if (is_ld) {
68
+ tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
69
+ tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
70
+ tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
71
+ } else {
72
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
73
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
74
+ tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
75
+ }
76
+ } else {
77
+ /* Otherwise use a pair of LD/ST. */
78
+ TCGReg base = h.base;
79
+ if (h.index != TCG_REG_ZERO) {
80
+ base = TCG_REG_TMP0;
81
+ tcg_out_opc_add_d(s, base, h.base, h.index);
82
+ }
83
+ if (is_ld) {
84
+ tcg_out_opc_ld_d(s, data_lo, base, 0);
85
+ tcg_out_opc_ld_d(s, data_hi, base, 8);
86
+ } else {
87
+ tcg_out_opc_st_d(s, data_lo, base, 0);
88
+ tcg_out_opc_st_d(s, data_hi, base, 8);
89
+ }
90
+ }
91
+
92
+ if (ldst) {
93
+ ldst->type = TCG_TYPE_I128;
94
+ ldst->datalo_reg = data_lo;
95
+ ldst->datahi_reg = data_hi;
96
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
97
+ }
25
+}
98
+}
26
+
99
+
27
static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
100
/*
28
{
101
* Entry-points
29
/*
102
*/
30
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
31
};
104
TCGArg a0 = args[0];
32
105
TCGArg a1 = args[1];
33
/* Check for compressed insn */
106
TCGArg a2 = args[2];
34
- if (extract16(opcode, 0, 2) != 3) {
107
+ TCGArg a3 = args[3];
35
+ if (insn_len(opcode) == 2) {
108
int c2 = const_args[2];
36
if (!has_ext(ctx, RVC)) {
109
37
gen_exception_illegal(ctx);
110
switch (opc) {
38
} else {
111
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
112
case INDEX_op_qemu_ld_a64_i64:
113
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
114
break;
115
+ case INDEX_op_qemu_ld_a32_i128:
116
+ case INDEX_op_qemu_ld_a64_i128:
117
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
118
+ break;
119
case INDEX_op_qemu_st_a32_i32:
120
case INDEX_op_qemu_st_a64_i32:
121
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
122
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
123
case INDEX_op_qemu_st_a64_i64:
124
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
125
break;
126
+ case INDEX_op_qemu_st_a32_i128:
127
+ case INDEX_op_qemu_st_a64_i128:
128
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
129
+ break;
130
131
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
132
case INDEX_op_mov_i64:
133
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
134
case INDEX_op_qemu_st_a64_i64:
135
return C_O0_I2(rZ, r);
136
137
+ case INDEX_op_qemu_ld_a32_i128:
138
+ case INDEX_op_qemu_ld_a64_i128:
139
+ return C_O2_I1(r, r, r);
140
+
141
+ case INDEX_op_qemu_st_a32_i128:
142
+ case INDEX_op_qemu_st_a64_i128:
143
+ return C_O0_I3(r, r, r);
144
+
145
case INDEX_op_brcond_i32:
146
case INDEX_op_brcond_i64:
147
return C_O0_I2(rZ, rZ);
39
--
148
--
40
2.34.1
149
2.34.1
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means that we've
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
got to mark page zero executable. We had been special casing this
2
Tested-by: Song Gao <gaosong@loongson.cn>
3
entirely within translate.
3
Reviewed-by: Song Gao <gaosong@loongson.cn>
4
Message-Id: <20230831030904.1194667-2-richard.henderson@linaro.org>
5
---
6
accel/tcg/tcg-runtime.h | 25 ++++++
7
include/tcg/tcg-op-gvec-common.h | 6 ++
8
accel/tcg/tcg-runtime-gvec.c | 26 ++++++
9
tcg/tcg-op-gvec.c | 149 +++++++++++++++++++++++++++++++
10
4 files changed, 206 insertions(+)
4
11
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
12
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
13
index XXXXXXX..XXXXXXX 100644
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
--- a/accel/tcg/tcg-runtime.h
8
---
15
+++ b/accel/tcg/tcg-runtime.h
9
linux-user/elfload.c | 34 +++++++++++++++++++++++++++++++---
16
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
10
1 file changed, 31 insertions(+), 3 deletions(-)
17
DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
11
18
DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
19
13
index XXXXXXX..XXXXXXX 100644
20
+DEF_HELPER_FLAGS_4(gvec_eqs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
14
--- a/linux-user/elfload.c
21
+DEF_HELPER_FLAGS_4(gvec_eqs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
15
+++ b/linux-user/elfload.c
22
+DEF_HELPER_FLAGS_4(gvec_eqs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
16
@@ -XXX,XX +XXX,XX @@ static inline void init_thread(struct target_pt_regs *regs,
23
+DEF_HELPER_FLAGS_4(gvec_eqs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
17
regs->gr[31] = infop->entry;
24
+
25
+DEF_HELPER_FLAGS_4(gvec_lts8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
26
+DEF_HELPER_FLAGS_4(gvec_lts16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
27
+DEF_HELPER_FLAGS_4(gvec_lts32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
28
+DEF_HELPER_FLAGS_4(gvec_lts64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
29
+
30
+DEF_HELPER_FLAGS_4(gvec_les8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
31
+DEF_HELPER_FLAGS_4(gvec_les16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
32
+DEF_HELPER_FLAGS_4(gvec_les32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
33
+DEF_HELPER_FLAGS_4(gvec_les64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
34
+
35
+DEF_HELPER_FLAGS_4(gvec_ltus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
36
+DEF_HELPER_FLAGS_4(gvec_ltus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
37
+DEF_HELPER_FLAGS_4(gvec_ltus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
38
+DEF_HELPER_FLAGS_4(gvec_ltus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
39
+
40
+DEF_HELPER_FLAGS_4(gvec_leus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
41
+DEF_HELPER_FLAGS_4(gvec_leus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
42
+DEF_HELPER_FLAGS_4(gvec_leus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
43
+DEF_HELPER_FLAGS_4(gvec_leus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
44
+
45
DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
46
diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/include/tcg/tcg-op-gvec-common.h
49
+++ b/include/tcg/tcg-op-gvec-common.h
50
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
51
void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
52
uint32_t aofs, uint32_t bofs,
53
uint32_t oprsz, uint32_t maxsz);
54
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
55
+ uint32_t aofs, int64_t c,
56
+ uint32_t oprsz, uint32_t maxsz);
57
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
58
+ uint32_t aofs, TCGv_i64 c,
59
+ uint32_t oprsz, uint32_t maxsz);
60
61
/*
62
* Perform vector bit select: d = (b & a) | (c & ~a).
63
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/accel/tcg/tcg-runtime-gvec.c
66
+++ b/accel/tcg/tcg-runtime-gvec.c
67
@@ -XXX,XX +XXX,XX @@ DO_CMP2(64)
68
#undef DO_CMP1
69
#undef DO_CMP2
70
71
+#define DO_CMP1(NAME, TYPE, OP) \
72
+void HELPER(NAME)(void *d, void *a, uint64_t b64, uint32_t desc) \
73
+{ \
74
+ intptr_t oprsz = simd_oprsz(desc); \
75
+ TYPE inv = simd_data(desc), b = b64; \
76
+ for (intptr_t i = 0; i < oprsz; i += sizeof(TYPE)) { \
77
+ *(TYPE *)(d + i) = -((*(TYPE *)(a + i) OP b) ^ inv); \
78
+ } \
79
+ clear_high(d, oprsz, desc); \
80
+}
81
+
82
+#define DO_CMP2(SZ) \
83
+ DO_CMP1(gvec_eqs##SZ, uint##SZ##_t, ==) \
84
+ DO_CMP1(gvec_lts##SZ, int##SZ##_t, <) \
85
+ DO_CMP1(gvec_les##SZ, int##SZ##_t, <=) \
86
+ DO_CMP1(gvec_ltus##SZ, uint##SZ##_t, <) \
87
+ DO_CMP1(gvec_leus##SZ, uint##SZ##_t, <=)
88
+
89
+DO_CMP2(8)
90
+DO_CMP2(16)
91
+DO_CMP2(32)
92
+DO_CMP2(64)
93
+
94
+#undef DO_CMP1
95
+#undef DO_CMP2
96
+
97
void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
98
{
99
intptr_t oprsz = simd_oprsz(desc);
100
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/tcg-op-gvec.c
103
+++ b/tcg/tcg-op-gvec.c
104
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
105
}
18
}
106
}
19
107
20
+#define LO_COMMPAGE 0
108
+static void expand_cmps_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
21
+
109
+ uint32_t oprsz, uint32_t tysz, TCGType type,
22
+static bool init_guest_commpage(void)
110
+ TCGCond cond, TCGv_vec c)
23
+{
111
+{
24
+ void *want = g2h_untagged(LO_COMMPAGE);
112
+ TCGv_vec t0 = tcg_temp_new_vec(type);
25
+ void *addr = mmap(want, qemu_host_page_size, PROT_NONE,
113
+ TCGv_vec t1 = tcg_temp_new_vec(type);
26
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
114
+ uint32_t i;
27
+
115
+
28
+ if (addr == MAP_FAILED) {
116
+ for (i = 0; i < oprsz; i += tysz) {
29
+ perror("Allocating guest commpage");
117
+ tcg_gen_ld_vec(t1, cpu_env, aofs + i);
30
+ exit(EXIT_FAILURE);
118
+ tcg_gen_cmp_vec(cond, vece, t0, t1, c);
31
+ }
119
+ tcg_gen_st_vec(t0, cpu_env, dofs + i);
32
+ if (addr != want) {
120
+ }
33
+ return false;
121
+}
122
+
123
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
124
+ uint32_t aofs, TCGv_i64 c,
125
+ uint32_t oprsz, uint32_t maxsz)
126
+{
127
+ static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 };
128
+ static gen_helper_gvec_2i * const eq_fn[4] = {
129
+ gen_helper_gvec_eqs8, gen_helper_gvec_eqs16,
130
+ gen_helper_gvec_eqs32, gen_helper_gvec_eqs64
131
+ };
132
+ static gen_helper_gvec_2i * const lt_fn[4] = {
133
+ gen_helper_gvec_lts8, gen_helper_gvec_lts16,
134
+ gen_helper_gvec_lts32, gen_helper_gvec_lts64
135
+ };
136
+ static gen_helper_gvec_2i * const le_fn[4] = {
137
+ gen_helper_gvec_les8, gen_helper_gvec_les16,
138
+ gen_helper_gvec_les32, gen_helper_gvec_les64
139
+ };
140
+ static gen_helper_gvec_2i * const ltu_fn[4] = {
141
+ gen_helper_gvec_ltus8, gen_helper_gvec_ltus16,
142
+ gen_helper_gvec_ltus32, gen_helper_gvec_ltus64
143
+ };
144
+ static gen_helper_gvec_2i * const leu_fn[4] = {
145
+ gen_helper_gvec_leus8, gen_helper_gvec_leus16,
146
+ gen_helper_gvec_leus32, gen_helper_gvec_leus64
147
+ };
148
+ static gen_helper_gvec_2i * const * const fns[16] = {
149
+ [TCG_COND_EQ] = eq_fn,
150
+ [TCG_COND_LT] = lt_fn,
151
+ [TCG_COND_LE] = le_fn,
152
+ [TCG_COND_LTU] = ltu_fn,
153
+ [TCG_COND_LEU] = leu_fn,
154
+ };
155
+
156
+ TCGType type;
157
+
158
+ check_size_align(oprsz, maxsz, dofs | aofs);
159
+ check_overlap_2(dofs, aofs, maxsz);
160
+
161
+ if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
162
+ do_dup(MO_8, dofs, oprsz, maxsz,
163
+ NULL, NULL, -(cond == TCG_COND_ALWAYS));
164
+ return;
34
+ }
165
+ }
35
+
166
+
36
+ /*
167
+ /*
37
+ * On Linux, page zero is normally marked execute only + gateway.
168
+ * Implement inline with a vector type, if possible.
38
+ * Normal read or write is supposed to fail (thus PROT_NONE above),
169
+ * Prefer integer when 64-bit host and 64-bit comparison.
39
+ * but specific offsets have kernel code mapped to raise permissions
40
+ * and implement syscalls. Here, simply mark the page executable.
41
+ * Special case the entry points during translation (see do_page_zero).
42
+ */
170
+ */
43
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
171
+ type = choose_vector_type(cmp_list, vece, oprsz,
44
+ PAGE_EXEC | PAGE_VALID);
172
+ TCG_TARGET_REG_BITS == 64 && vece == MO_64);
45
+ return true;
173
+ if (type != 0) {
46
+}
174
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(cmp_list);
47
+
175
+ TCGv_vec t_vec = tcg_temp_new_vec(type);
48
#endif /* TARGET_HPPA */
176
+ uint32_t some;
49
177
+
50
#ifdef TARGET_XTENSA
178
+ tcg_gen_dup_i64_vec(vece, t_vec, c);
51
@@ -XXX,XX +XXX,XX @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
179
+ switch (type) {
52
}
180
+ case TCG_TYPE_V256:
53
181
+ some = QEMU_ALIGN_DOWN(oprsz, 32);
54
#if defined(HI_COMMPAGE)
182
+ expand_cmps_vec(vece, dofs, aofs, some, 32,
55
-#define LO_COMMPAGE 0
183
+ TCG_TYPE_V256, cond, t_vec);
56
+#define LO_COMMPAGE -1
184
+ aofs += some;
57
#elif defined(LO_COMMPAGE)
185
+ dofs += some;
58
#define HI_COMMPAGE 0
186
+ oprsz -= some;
59
#else
187
+ maxsz -= some;
60
#define HI_COMMPAGE 0
188
+ /* fallthru */
61
-#define LO_COMMPAGE 0
189
+
62
+#define LO_COMMPAGE -1
190
+ case TCG_TYPE_V128:
63
#define init_guest_commpage() true
191
+ some = QEMU_ALIGN_DOWN(oprsz, 16);
64
#endif
192
+ expand_cmps_vec(vece, dofs, aofs, some, 16,
65
193
+ TCG_TYPE_V128, cond, t_vec);
66
@@ -XXX,XX +XXX,XX @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
194
+ break;
67
} else {
195
+
68
offset = -(HI_COMMPAGE & -align);
196
+ case TCG_TYPE_V64:
69
}
197
+ some = QEMU_ALIGN_DOWN(oprsz, 8);
70
- } else if (LO_COMMPAGE != 0) {
198
+ expand_cmps_vec(vece, dofs, aofs, some, 8,
71
+ } else if (LO_COMMPAGE != -1) {
199
+ TCG_TYPE_V64, cond, t_vec);
72
loaddr = MIN(loaddr, LO_COMMPAGE & -align);
200
+ break;
73
}
201
+
74
202
+ default:
203
+ g_assert_not_reached();
204
+ }
205
+ tcg_temp_free_vec(t_vec);
206
+ tcg_swap_vecop_list(hold_list);
207
+ } else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
208
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
209
+ uint32_t i;
210
+
211
+ for (i = 0; i < oprsz; i += 8) {
212
+ tcg_gen_ld_i64(t0, cpu_env, aofs + i);
213
+ tcg_gen_negsetcond_i64(cond, t0, t0, c);
214
+ tcg_gen_st_i64(t0, cpu_env, dofs + i);
215
+ }
216
+ tcg_temp_free_i64(t0);
217
+ } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
218
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
219
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
220
+ uint32_t i;
221
+
222
+ tcg_gen_extrl_i64_i32(t1, c);
223
+ for (i = 0; i < oprsz; i += 8) {
224
+ tcg_gen_ld_i32(t0, cpu_env, aofs + i);
225
+ tcg_gen_negsetcond_i32(cond, t0, t0, t1);
226
+ tcg_gen_st_i32(t0, cpu_env, dofs + i);
227
+ }
228
+ tcg_temp_free_i32(t0);
229
+ tcg_temp_free_i32(t1);
230
+ } else {
231
+ gen_helper_gvec_2i * const *fn = fns[cond];
232
+ bool inv = false;
233
+
234
+ if (fn == NULL) {
235
+ cond = tcg_invert_cond(cond);
236
+ fn = fns[cond];
237
+ assert(fn != NULL);
238
+ inv = true;
239
+ }
240
+ tcg_gen_gvec_2i_ool(dofs, aofs, c, oprsz, maxsz, inv, fn[vece]);
241
+ return;
242
+ }
243
+
244
+ if (oprsz < maxsz) {
245
+ expand_clr(dofs + oprsz, maxsz - oprsz);
246
+ }
247
+}
248
+
249
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
250
+ uint32_t aofs, int64_t c,
251
+ uint32_t oprsz, uint32_t maxsz)
252
+{
253
+ TCGv_i64 tmp = tcg_constant_i64(c);
254
+ tcg_gen_gvec_cmps(cond, vece, dofs, aofs, tmp, oprsz, maxsz);
255
+}
256
+
257
static void tcg_gen_bitsel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c)
258
{
259
TCGv_i64 t = tcg_temp_ebb_new_i64();
75
--
260
--
76
2.34.1
261
2.34.1
diff view generated by jsdifflib
New patch
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Reviewed-by: Song Gao <gaosong@loongson.cn>
3
Message-Id: <20230831030904.1194667-3-richard.henderson@linaro.org>
4
---
5
target/arm/tcg/translate.c | 56 ++++++--------------------------------
6
1 file changed, 9 insertions(+), 47 deletions(-)
1
7
8
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
9
index XXXXXXX..XXXXXXX 100644
10
--- a/target/arm/tcg/translate.c
11
+++ b/target/arm/tcg/translate.c
12
@@ -XXX,XX +XXX,XX @@ void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
13
gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
14
}
15
16
-#define GEN_CMP0(NAME, COND) \
17
- static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
18
- { \
19
- tcg_gen_negsetcond_i32(COND, d, a, tcg_constant_i32(0)); \
20
- } \
21
- static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
22
- { \
23
- tcg_gen_negsetcond_i64(COND, d, a, tcg_constant_i64(0)); \
24
- } \
25
- static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
26
- { \
27
- TCGv_vec zero = tcg_constant_vec_matching(d, vece, 0); \
28
- tcg_gen_cmp_vec(COND, vece, d, a, zero); \
29
- } \
30
- void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
31
- uint32_t opr_sz, uint32_t max_sz) \
32
- { \
33
- const GVecGen2 op[4] = { \
34
- { .fno = gen_helper_gvec_##NAME##0_b, \
35
- .fniv = gen_##NAME##0_vec, \
36
- .opt_opc = vecop_list_cmp, \
37
- .vece = MO_8 }, \
38
- { .fno = gen_helper_gvec_##NAME##0_h, \
39
- .fniv = gen_##NAME##0_vec, \
40
- .opt_opc = vecop_list_cmp, \
41
- .vece = MO_16 }, \
42
- { .fni4 = gen_##NAME##0_i32, \
43
- .fniv = gen_##NAME##0_vec, \
44
- .opt_opc = vecop_list_cmp, \
45
- .vece = MO_32 }, \
46
- { .fni8 = gen_##NAME##0_i64, \
47
- .fniv = gen_##NAME##0_vec, \
48
- .opt_opc = vecop_list_cmp, \
49
- .prefer_i64 = TCG_TARGET_REG_BITS == 64, \
50
- .vece = MO_64 }, \
51
- }; \
52
- tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \
53
- }
54
+#define GEN_CMP0(NAME, COND) \
55
+ void NAME(unsigned vece, uint32_t d, uint32_t m, \
56
+ uint32_t opr_sz, uint32_t max_sz) \
57
+ { tcg_gen_gvec_cmpi(COND, vece, d, m, 0, opr_sz, max_sz); }
58
59
-static const TCGOpcode vecop_list_cmp[] = {
60
- INDEX_op_cmp_vec, 0
61
-};
62
-
63
-GEN_CMP0(ceq, TCG_COND_EQ)
64
-GEN_CMP0(cle, TCG_COND_LE)
65
-GEN_CMP0(cge, TCG_COND_GE)
66
-GEN_CMP0(clt, TCG_COND_LT)
67
-GEN_CMP0(cgt, TCG_COND_GT)
68
+GEN_CMP0(gen_gvec_ceq0, TCG_COND_EQ)
69
+GEN_CMP0(gen_gvec_cle0, TCG_COND_LE)
70
+GEN_CMP0(gen_gvec_cge0, TCG_COND_GE)
71
+GEN_CMP0(gen_gvec_clt0, TCG_COND_LT)
72
+GEN_CMP0(gen_gvec_cgt0, TCG_COND_GT)
73
74
#undef GEN_CMP0
75
76
--
77
2.34.1
diff view generated by jsdifflib
1
The current implementation is a no-op, simply returning addr.
1
Now that we defer address space update and tlb_flush until
2
This is incorrect, because we ought to be checking the page
2
the next async_run_on_cpu, the plugin run at the end of the
3
permissions for execution.
3
instruction no longer has to contend with a flushed tlb.
4
Therefore, delete SavedIOTLB entirely.
4
5
5
Make get_page_addr_code inline for both implementations.
6
Properly return false from tlb_plugin_lookup when we do
7
not have a tlb match.
6
8
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Fixes a bug in which SavedIOTLB had stale data, because
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
10
there were multiple i/o accesses within a single insn.
9
Acked-by: Alistair Francis <alistair.francis@wdc.com>
11
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
13
---
12
include/exec/exec-all.h | 85 ++++++++++++++---------------------------
14
include/hw/core/cpu.h | 13 -------
13
accel/tcg/cputlb.c | 5 ---
15
include/qemu/typedefs.h | 1 -
14
accel/tcg/user-exec.c | 14 +++++++
16
accel/tcg/cputlb.c | 79 ++++++++++++-----------------------------
15
3 files changed, 42 insertions(+), 62 deletions(-)
17
3 files changed, 23 insertions(+), 70 deletions(-)
16
18
17
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/exec-all.h
21
--- a/include/hw/core/cpu.h
20
+++ b/include/exec/exec-all.h
22
+++ b/include/hw/core/cpu.h
21
@@ -XXX,XX +XXX,XX @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
23
@@ -XXX,XX +XXX,XX @@ struct CPUWatchpoint {
22
hwaddr index, MemTxAttrs attrs);
24
QTAILQ_ENTRY(CPUWatchpoint) entry;
25
};
26
27
-#ifdef CONFIG_PLUGIN
28
-/*
29
- * For plugins we sometime need to save the resolved iotlb data before
30
- * the memory regions get moved around by io_writex.
31
- */
32
-typedef struct SavedIOTLB {
33
- MemoryRegionSection *section;
34
- hwaddr mr_offset;
35
-} SavedIOTLB;
36
-#endif
37
-
38
struct KVMState;
39
struct kvm_run;
40
41
@@ -XXX,XX +XXX,XX @@ struct CPUState {
42
43
#ifdef CONFIG_PLUGIN
44
GArray *plugin_mem_cbs;
45
- /* saved iotlb data from io_writex */
46
- SavedIOTLB saved_iotlb;
23
#endif
47
#endif
24
48
25
-#if defined(CONFIG_USER_ONLY)
49
/* TODO Move common fields from CPUArchState here. */
26
-void mmap_lock(void);
50
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
27
-void mmap_unlock(void);
51
index XXXXXXX..XXXXXXX 100644
28
-bool have_mmap_lock(void);
52
--- a/include/qemu/typedefs.h
29
-
53
+++ b/include/qemu/typedefs.h
30
/**
54
@@ -XXX,XX +XXX,XX @@ typedef struct QString QString;
31
- * get_page_addr_code() - user-mode version
55
typedef struct RAMBlock RAMBlock;
32
+ * get_page_addr_code_hostp()
56
typedef struct Range Range;
33
* @env: CPUArchState
57
typedef struct ReservedRegion ReservedRegion;
34
* @addr: guest virtual address of guest code
58
-typedef struct SavedIOTLB SavedIOTLB;
35
*
59
typedef struct SHPCDevice SHPCDevice;
36
- * Returns @addr.
60
typedef struct SSIBus SSIBus;
37
+ * See get_page_addr_code() (full-system version) for documentation on the
61
typedef struct TCGHelperInfo TCGHelperInfo;
38
+ * return value.
39
+ *
40
+ * Sets *@hostp (when @hostp is non-NULL) as follows.
41
+ * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
42
+ * to the host address where @addr's content is kept.
43
+ *
44
+ * Note: this function can trigger an exception.
45
+ */
46
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
47
+ void **hostp);
48
+
49
+/**
50
+ * get_page_addr_code()
51
+ * @env: CPUArchState
52
+ * @addr: guest virtual address of guest code
53
+ *
54
+ * If we cannot translate and execute from the entire RAM page, or if
55
+ * the region is not backed by RAM, returns -1. Otherwise, returns the
56
+ * ram_addr_t corresponding to the guest code at @addr.
57
+ *
58
+ * Note: this function can trigger an exception.
59
*/
60
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
61
target_ulong addr)
62
{
63
- return addr;
64
+ return get_page_addr_code_hostp(env, addr, NULL);
65
}
66
67
-/**
68
- * get_page_addr_code_hostp() - user-mode version
69
- * @env: CPUArchState
70
- * @addr: guest virtual address of guest code
71
- *
72
- * Returns @addr.
73
- *
74
- * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
75
- * is kept.
76
- */
77
-static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
78
- target_ulong addr,
79
- void **hostp)
80
-{
81
- if (hostp) {
82
- *hostp = g2h_untagged(addr);
83
- }
84
- return addr;
85
-}
86
+#if defined(CONFIG_USER_ONLY)
87
+void mmap_lock(void);
88
+void mmap_unlock(void);
89
+bool have_mmap_lock(void);
90
91
/**
92
* adjust_signal_pc:
93
@@ -XXX,XX +XXX,XX @@ G_NORETURN void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
94
static inline void mmap_lock(void) {}
95
static inline void mmap_unlock(void) {}
96
97
-/**
98
- * get_page_addr_code() - full-system version
99
- * @env: CPUArchState
100
- * @addr: guest virtual address of guest code
101
- *
102
- * If we cannot translate and execute from the entire RAM page, or if
103
- * the region is not backed by RAM, returns -1. Otherwise, returns the
104
- * ram_addr_t corresponding to the guest code at @addr.
105
- *
106
- * Note: this function can trigger an exception.
107
- */
108
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
109
-
110
-/**
111
- * get_page_addr_code_hostp() - full-system version
112
- * @env: CPUArchState
113
- * @addr: guest virtual address of guest code
114
- *
115
- * See get_page_addr_code() (full-system version) for documentation on the
116
- * return value.
117
- *
118
- * Sets *@hostp (when @hostp is non-NULL) as follows.
119
- * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
120
- * to the host address where @addr's content is kept.
121
- *
122
- * Note: this function can trigger an exception.
123
- */
124
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
125
- void **hostp);
126
-
127
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
128
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
129
130
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
62
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
131
index XXXXXXX..XXXXXXX 100644
63
index XXXXXXX..XXXXXXX 100644
132
--- a/accel/tcg/cputlb.c
64
--- a/accel/tcg/cputlb.c
133
+++ b/accel/tcg/cputlb.c
65
+++ b/accel/tcg/cputlb.c
134
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
66
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
135
return qemu_ram_addr_from_host_nofail(p);
67
}
136
}
68
}
137
69
138
-tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
70
-/*
71
- * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
72
- * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
73
- * because of the side effect of io_writex changing memory layout.
74
- */
75
-static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
76
- hwaddr mr_offset)
139
-{
77
-{
140
- return get_page_addr_code_hostp(env, addr, NULL);
78
-#ifdef CONFIG_PLUGIN
79
- SavedIOTLB *saved = &cs->saved_iotlb;
80
- saved->section = section;
81
- saved->mr_offset = mr_offset;
82
-#endif
141
-}
83
-}
142
-
84
-
143
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
85
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
144
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
86
int mmu_idx, vaddr addr, uintptr_t retaddr,
87
MMUAccessType access_type, MemOp op)
88
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
89
cpu_io_recompile(cpu, retaddr);
90
}
91
92
- /*
93
- * The memory_region_dispatch may trigger a flush/resize
94
- * so for plugins we save the iotlb_data just in case.
95
- */
96
- save_iotlb_data(cpu, section, mr_offset);
97
-
98
{
99
QEMU_IOTHREAD_LOCK_GUARD();
100
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
101
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
102
}
103
cpu->mem_io_pc = retaddr;
104
105
- /*
106
- * The memory_region_dispatch may trigger a flush/resize
107
- * so for plugins we save the iotlb_data just in case.
108
- */
109
- save_iotlb_data(cpu, section, mr_offset);
110
-
111
{
112
QEMU_IOTHREAD_LOCK_GUARD();
113
r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
114
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
115
* in the softmmu lookup code (or helper). We don't handle re-fills or
116
* checking the victim table. This is purely informational.
117
*
118
- * This almost never fails as the memory access being instrumented
119
- * should have just filled the TLB. The one corner case is io_writex
120
- * which can cause TLB flushes and potential resizing of the TLBs
121
- * losing the information we need. In those cases we need to recover
122
- * data from a copy of the CPUTLBEntryFull. As long as this always occurs
123
- * from the same thread (which a mem callback will be) this is safe.
124
+ * The one corner case is i/o write, which can cause changes to the
125
+ * address space. Those changes, and the corresponding tlb flush,
126
+ * should be delayed until the next TB, so even then this ought not fail.
127
+ * But check, Just in Case.
128
*/
129
-
130
bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
131
bool is_store, struct qemu_plugin_hwaddr *data)
145
{
132
{
146
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
133
CPUArchState *env = cpu->env_ptr;
147
index XXXXXXX..XXXXXXX 100644
134
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
148
--- a/accel/tcg/user-exec.c
135
uintptr_t index = tlb_index(env, mmu_idx, addr);
149
+++ b/accel/tcg/user-exec.c
136
- uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
150
@@ -XXX,XX +XXX,XX @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
137
+ MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
151
return size ? g2h(env_cpu(env), addr) : NULL;
138
+ uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
152
}
139
153
140
- if (likely(tlb_hit(tlb_addr, addr))) {
154
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
141
- /* We must have an iotlb entry for MMIO */
155
+ void **hostp)
142
- if (tlb_addr & TLB_MMIO) {
156
+{
143
- CPUTLBEntryFull *full;
157
+ int flags;
144
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
145
- data->is_io = true;
146
- data->v.io.section =
147
- iotlb_to_section(cpu, full->xlat_section, full->attrs);
148
- data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
149
- } else {
150
- data->is_io = false;
151
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
152
- }
153
- return true;
154
- } else {
155
- SavedIOTLB *saved = &cpu->saved_iotlb;
156
- data->is_io = true;
157
- data->v.io.section = saved->section;
158
- data->v.io.offset = saved->mr_offset;
159
- return true;
160
+ if (unlikely(!tlb_hit(tlb_addr, addr))) {
161
+ return false;
162
}
163
-}
164
165
+ /* We must have an iotlb entry for MMIO */
166
+ if (tlb_addr & TLB_MMIO) {
167
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
168
+ hwaddr xlat = full->xlat_section;
158
+
169
+
159
+ flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
170
+ data->is_io = true;
160
+ g_assert(flags == 0);
171
+ data->v.io.offset = (xlat & TARGET_PAGE_MASK) + addr;
161
+
172
+ data->v.io.section =
162
+ if (hostp) {
173
+ iotlb_to_section(cpu, xlat & ~TARGET_PAGE_MASK, full->attrs);
163
+ *hostp = g2h_untagged(addr);
174
+ } else {
175
+ data->is_io = false;
176
+ data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
164
+ }
177
+ }
165
+ return addr;
178
+ return true;
166
+}
179
+}
167
+
180
#endif
168
/* The softmmu versions of these helpers are in cputlb.c. */
169
181
170
/*
182
/*
171
--
183
--
172
2.34.1
184
2.34.1
diff view generated by jsdifflib
1
While there are no target-specific nonfaulting probes,
1
These are common code from io_readx and io_writex.
2
generic code may grow some uses at some point.
3
2
4
Note that the attrs argument was incorrect -- it should have
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
been MEMTXATTRS_UNSPECIFIED. Just use the simpler interface.
6
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
target/avr/helper.c | 46 ++++++++++++++++++++++++++++-----------------
6
accel/tcg/cputlb.c | 77 +++++++++++++++++++++++++++-------------------
11
1 file changed, 29 insertions(+), 17 deletions(-)
7
1 file changed, 45 insertions(+), 32 deletions(-)
12
8
13
diff --git a/target/avr/helper.c b/target/avr/helper.c
9
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/target/avr/helper.c
11
--- a/accel/tcg/cputlb.c
16
+++ b/target/avr/helper.c
12
+++ b/accel/tcg/cputlb.c
17
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
13
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
18
MMUAccessType access_type, int mmu_idx,
14
* (non-page-aligned) vaddr of the eventual memory access to get
19
bool probe, uintptr_t retaddr)
15
* the MemoryRegion offset for the access. Note that the vaddr we
16
* subtract here is that of the page base, and not the same as the
17
- * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
18
+ * vaddr we add back in io_prepare()/get_page_addr_code().
19
*/
20
desc->fulltlb[index] = *full;
21
full = &desc->fulltlb[index];
22
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
23
}
24
}
25
26
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
27
- int mmu_idx, vaddr addr, uintptr_t retaddr,
28
- MMUAccessType access_type, MemOp op)
29
+static MemoryRegionSection *
30
+io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
31
+ MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
20
{
32
{
21
- int prot = 0;
33
CPUState *cpu = env_cpu(env);
22
- MemTxAttrs attrs = {};
34
- hwaddr mr_offset;
23
+ int prot, page_size = TARGET_PAGE_SIZE;
35
MemoryRegionSection *section;
24
uint32_t paddr;
36
- MemoryRegion *mr;
25
37
- uint64_t val;
26
address &= TARGET_PAGE_MASK;
38
- MemTxResult r;
27
39
+ hwaddr mr_offset;
28
if (mmu_idx == MMU_CODE_IDX) {
40
29
- /* access to code in flash */
41
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
30
+ /* Access to code in flash. */
42
- mr = section->mr;
31
paddr = OFFSET_CODE + address;
43
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
32
prot = PAGE_READ | PAGE_EXEC;
44
+ section = iotlb_to_section(cpu, xlat, attrs);
33
- if (paddr + TARGET_PAGE_SIZE > OFFSET_DATA) {
45
+ mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
34
+ if (paddr >= OFFSET_DATA) {
46
cpu->mem_io_pc = retaddr;
35
+ /*
47
if (!cpu->can_do_io) {
36
+ * This should not be possible via any architectural operations.
48
cpu_io_recompile(cpu, retaddr);
37
+ * There is certainly not an exception that we can deliver.
38
+ * Accept probing that might come from generic code.
39
+ */
40
+ if (probe) {
41
+ return false;
42
+ }
43
error_report("execution left flash memory");
44
abort();
45
}
46
- } else if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
47
- /*
48
- * access to CPU registers, exit and rebuilt this TB to use full access
49
- * incase it touches specially handled registers like SREG or SP
50
- */
51
- AVRCPU *cpu = AVR_CPU(cs);
52
- CPUAVRState *env = &cpu->env;
53
- env->fullacc = 1;
54
- cpu_loop_exit_restore(cs, retaddr);
55
} else {
56
- /* access to memory. nothing special */
57
+ /* Access to memory. */
58
paddr = OFFSET_DATA + address;
59
prot = PAGE_READ | PAGE_WRITE;
60
+ if (address < NUMBER_OF_CPU_REGISTERS + NUMBER_OF_IO_REGISTERS) {
61
+ /*
62
+ * Access to CPU registers, exit and rebuilt this TB to use
63
+ * full access in case it touches specially handled registers
64
+ * like SREG or SP. For probing, set page_size = 1, in order
65
+ * to force tlb_fill to be called for the next access.
66
+ */
67
+ if (probe) {
68
+ page_size = 1;
69
+ } else {
70
+ AVRCPU *cpu = AVR_CPU(cs);
71
+ CPUAVRState *env = &cpu->env;
72
+ env->fullacc = 1;
73
+ cpu_loop_exit_restore(cs, retaddr);
74
+ }
75
+ }
76
}
49
}
77
50
78
- tlb_set_page_with_attrs(cs, address, paddr, attrs, prot,
51
+ *out_offset = mr_offset;
79
- mmu_idx, TARGET_PAGE_SIZE);
52
+ return section;
53
+}
54
+
55
+static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
56
+ unsigned size, MMUAccessType access_type, int mmu_idx,
57
+ MemTxResult response, uintptr_t retaddr,
58
+ MemoryRegionSection *section, hwaddr mr_offset)
59
+{
60
+ hwaddr physaddr = (mr_offset +
61
+ section->offset_within_address_space -
62
+ section->offset_within_region);
63
+
64
+ cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
65
+ mmu_idx, full->attrs, response, retaddr);
66
+}
67
+
68
+static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
69
+ int mmu_idx, vaddr addr, uintptr_t retaddr,
70
+ MMUAccessType access_type, MemOp op)
71
+{
72
+ MemoryRegionSection *section;
73
+ hwaddr mr_offset;
74
+ MemoryRegion *mr;
75
+ MemTxResult r;
76
+ uint64_t val;
77
+
78
+ section = io_prepare(&mr_offset, env, full->xlat_section,
79
+ full->attrs, addr, retaddr);
80
+ mr = section->mr;
81
+
82
{
83
QEMU_IOTHREAD_LOCK_GUARD();
84
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
85
}
86
87
if (r != MEMTX_OK) {
88
- hwaddr physaddr = mr_offset +
89
- section->offset_within_address_space -
90
- section->offset_within_region;
80
-
91
-
81
+ tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size);
92
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
82
return true;
93
- mmu_idx, full->attrs, r, retaddr);
94
+ io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
95
+ r, retaddr, section, mr_offset);
96
}
97
return val;
83
}
98
}
99
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
100
int mmu_idx, uint64_t val, vaddr addr,
101
uintptr_t retaddr, MemOp op)
102
{
103
- CPUState *cpu = env_cpu(env);
104
- hwaddr mr_offset;
105
MemoryRegionSection *section;
106
+ hwaddr mr_offset;
107
MemoryRegion *mr;
108
MemTxResult r;
109
110
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
111
+ section = io_prepare(&mr_offset, env, full->xlat_section,
112
+ full->attrs, addr, retaddr);
113
mr = section->mr;
114
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
115
- if (!cpu->can_do_io) {
116
- cpu_io_recompile(cpu, retaddr);
117
- }
118
- cpu->mem_io_pc = retaddr;
119
120
{
121
QEMU_IOTHREAD_LOCK_GUARD();
122
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
123
}
124
125
if (r != MEMTX_OK) {
126
- hwaddr physaddr = mr_offset +
127
- section->offset_within_address_space -
128
- section->offset_within_region;
129
-
130
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
131
- MMU_DATA_STORE, mmu_idx, full->attrs, r,
132
- retaddr);
133
+ io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
134
+ r, retaddr, section, mr_offset);
135
}
136
}
84
137
85
--
138
--
86
2.34.1
139
2.34.1
87
140
88
141
diff view generated by jsdifflib
1
We cannot deliver two interrupts simultaneously;
1
Since the introduction of CPUTLBEntryFull, we can recover
2
the first interrupt handler must execute first.
2
the full cpu address space physical address without having
3
to examine the MemoryRegionSection.
3
4
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
target/avr/helper.c | 9 +++------
8
accel/tcg/cputlb.c | 12 ++++--------
9
1 file changed, 3 insertions(+), 6 deletions(-)
9
1 file changed, 4 insertions(+), 8 deletions(-)
10
10
11
diff --git a/target/avr/helper.c b/target/avr/helper.c
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/helper.c
13
--- a/accel/tcg/cputlb.c
14
+++ b/target/avr/helper.c
14
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@
15
@@ -XXX,XX +XXX,XX @@ io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
16
16
17
bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
17
static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
18
unsigned size, MMUAccessType access_type, int mmu_idx,
19
- MemTxResult response, uintptr_t retaddr,
20
- MemoryRegionSection *section, hwaddr mr_offset)
21
+ MemTxResult response, uintptr_t retaddr)
18
{
22
{
19
- bool ret = false;
23
- hwaddr physaddr = (mr_offset +
20
AVRCPU *cpu = AVR_CPU(cs);
24
- section->offset_within_address_space -
21
CPUAVRState *env = &cpu->env;
25
- section->offset_within_region);
22
23
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
24
avr_cpu_do_interrupt(cs);
25
26
cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
27
-
26
-
28
- ret = true;
27
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
29
+ return true;
28
cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
30
}
29
mmu_idx, full->attrs, response, retaddr);
30
}
31
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
32
33
if (r != MEMTX_OK) {
34
io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
35
- r, retaddr, section, mr_offset);
36
+ r, retaddr);
31
}
37
}
32
if (interrupt_request & CPU_INTERRUPT_HARD) {
38
return val;
33
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
39
}
34
if (!env->intsrc) {
40
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
35
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
41
36
}
42
if (r != MEMTX_OK) {
37
-
43
io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
38
- ret = true;
44
- r, retaddr, section, mr_offset);
39
+ return true;
45
+ r, retaddr);
40
}
41
}
46
}
42
- return ret;
43
+ return false;
44
}
47
}
45
48
46
void avr_cpu_do_interrupt(CPUState *cs)
47
--
49
--
48
2.34.1
50
2.34.1
49
51
50
52
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means
1
Rather than saving MemoryRegionSection and offset,
2
that we've got to put this code into a section that is
2
save phys_addr and MemoryRegion. This matches up
3
both writable and executable.
3
much closer with the plugin api.
4
4
5
Note that this test did not run on hardware beforehand either.
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
6
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
7
---
11
tests/tcg/i386/test-i386.c | 2 +-
8
include/qemu/plugin-memory.h | 11 ++---------
12
1 file changed, 1 insertion(+), 1 deletion(-)
9
accel/tcg/cputlb.c | 16 +++++++++-------
10
plugins/api.c | 27 ++++++---------------------
11
3 files changed, 17 insertions(+), 37 deletions(-)
13
12
14
diff --git a/tests/tcg/i386/test-i386.c b/tests/tcg/i386/test-i386.c
13
diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/tests/tcg/i386/test-i386.c
15
--- a/include/qemu/plugin-memory.h
17
+++ b/tests/tcg/i386/test-i386.c
16
+++ b/include/qemu/plugin-memory.h
18
@@ -XXX,XX +XXX,XX @@ uint8_t code[] = {
17
@@ -XXX,XX +XXX,XX @@
19
0xc3, /* ret */
18
struct qemu_plugin_hwaddr {
19
bool is_io;
20
bool is_store;
21
- union {
22
- struct {
23
- MemoryRegionSection *section;
24
- hwaddr offset;
25
- } io;
26
- struct {
27
- void *hostaddr;
28
- } ram;
29
- } v;
30
+ hwaddr phys_addr;
31
+ MemoryRegion *mr;
20
};
32
};
21
33
22
-asm(".section \".data\"\n"
34
/**
23
+asm(".section \".data_x\",\"awx\"\n"
35
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
24
"smc_code2:\n"
36
index XXXXXXX..XXXXXXX 100644
25
"movl 4(%esp), %eax\n"
37
--- a/accel/tcg/cputlb.c
26
"movl %eax, smc_patch_addr2 + 1\n"
38
+++ b/accel/tcg/cputlb.c
39
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
40
uintptr_t index = tlb_index(env, mmu_idx, addr);
41
MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
42
uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
43
+ CPUTLBEntryFull *full;
44
45
if (unlikely(!tlb_hit(tlb_addr, addr))) {
46
return false;
47
}
48
49
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
50
+ data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
51
+
52
/* We must have an iotlb entry for MMIO */
53
if (tlb_addr & TLB_MMIO) {
54
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
55
- hwaddr xlat = full->xlat_section;
56
-
57
+ MemoryRegionSection *section =
58
+ iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
59
+ full->attrs);
60
data->is_io = true;
61
- data->v.io.offset = (xlat & TARGET_PAGE_MASK) + addr;
62
- data->v.io.section =
63
- iotlb_to_section(cpu, xlat & ~TARGET_PAGE_MASK, full->attrs);
64
+ data->mr = section->mr;
65
} else {
66
data->is_io = false;
67
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
68
+ data->mr = NULL;
69
}
70
return true;
71
}
72
diff --git a/plugins/api.c b/plugins/api.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/plugins/api.c
75
+++ b/plugins/api.c
76
@@ -XXX,XX +XXX,XX @@ uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr)
77
{
78
#ifdef CONFIG_SOFTMMU
79
if (haddr) {
80
- if (!haddr->is_io) {
81
- RAMBlock *block;
82
- ram_addr_t offset;
83
- void *hostaddr = haddr->v.ram.hostaddr;
84
-
85
- block = qemu_ram_block_from_host(hostaddr, false, &offset);
86
- if (!block) {
87
- error_report("Bad host ram pointer %p", haddr->v.ram.hostaddr);
88
- abort();
89
- }
90
-
91
- return block->offset + offset + block->mr->addr;
92
- } else {
93
- MemoryRegionSection *mrs = haddr->v.io.section;
94
- return mrs->offset_within_address_space + haddr->v.io.offset;
95
- }
96
+ return haddr->phys_addr;
97
}
98
#endif
99
return 0;
100
@@ -XXX,XX +XXX,XX @@ const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h)
101
{
102
#ifdef CONFIG_SOFTMMU
103
if (h && h->is_io) {
104
- MemoryRegionSection *mrs = h->v.io.section;
105
- if (!mrs->mr->name) {
106
- unsigned long maddr = 0xffffffff & (uintptr_t) mrs->mr;
107
- g_autofree char *temp = g_strdup_printf("anon%08lx", maddr);
108
+ MemoryRegion *mr = h->mr;
109
+ if (!mr->name) {
110
+ unsigned maddr = (uintptr_t)mr;
111
+ g_autofree char *temp = g_strdup_printf("anon%08x", maddr);
112
return g_intern_string(temp);
113
} else {
114
- return g_intern_string(mrs->mr->name);
115
+ return g_intern_string(mr->name);
116
}
117
} else {
118
return g_intern_static_string("RAM");
27
--
119
--
28
2.34.1
120
2.34.1
121
122
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
Push computation down into the if statements to the point
2
the data is used.
2
3
3
Right now translator stops right *after* the end of a page, which
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
breaks reporting of fault locations when the last instruction of a
5
multi-insn translation block crosses a page boundary.
6
7
An implementation, like the one arm and s390x have, would require an
8
i386 length disassembler, which is burdensome to maintain. Another
9
alternative would be to single-step at the end of a guest page, but
10
this may come with a performance impact.
11
12
Fix by snapshotting disassembly state and restoring it after we figure
13
out we crossed a page boundary. This includes rolling back cc_op
14
updates and emitted ops.
15
16
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
17
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
18
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1143
19
Message-Id: <20220817150506.592862-4-iii@linux.ibm.com>
20
[rth: Simplify end-of-insn cross-page checks.]
21
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
22
---
6
---
23
target/i386/tcg/translate.c | 64 ++++++++++++++++-----------
7
accel/tcg/cputlb.c | 33 +++++++++++++--------------------
24
tests/tcg/x86_64/noexec.c | 75 ++++++++++++++++++++++++++++++++
8
1 file changed, 13 insertions(+), 20 deletions(-)
25
tests/tcg/x86_64/Makefile.target | 3 +-
26
3 files changed, 116 insertions(+), 26 deletions(-)
27
create mode 100644 tests/tcg/x86_64/noexec.c
28
9
29
diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
30
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
31
--- a/target/i386/tcg/translate.c
12
--- a/accel/tcg/cputlb.c
32
+++ b/target/i386/tcg/translate.c
13
+++ b/accel/tcg/cputlb.c
33
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
14
@@ -XXX,XX +XXX,XX @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
34
TCGv_i64 tmp1_i64;
15
mmu_idx, retaddr);
35
16
}
36
sigjmp_buf jmpbuf;
17
37
+ TCGOp *prev_insn_end;
18
-static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
38
} DisasContext;
19
- vaddr addr, unsigned size,
39
20
- MMUAccessType access_type,
40
/* The environment in which user-only runs is constrained. */
21
- int mmu_idx, MemTxAttrs attrs,
41
@@ -XXX,XX +XXX,XX @@ static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
22
- MemTxResult response,
23
- uintptr_t retaddr)
24
-{
25
- CPUClass *cc = CPU_GET_CLASS(cpu);
26
-
27
- if (!cpu->ignore_memory_transaction_failures &&
28
- cc->tcg_ops->do_transaction_failed) {
29
- cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
30
- access_type, mmu_idx, attrs,
31
- response, retaddr);
32
- }
33
-}
34
-
35
static MemoryRegionSection *
36
io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
37
MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
38
@@ -XXX,XX +XXX,XX @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
39
unsigned size, MMUAccessType access_type, int mmu_idx,
40
MemTxResult response, uintptr_t retaddr)
42
{
41
{
43
uint64_t pc = s->pc;
42
- hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
44
43
- cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
45
+ /* This is a subsequent insn that crosses a page boundary. */
44
- mmu_idx, full->attrs, response, retaddr);
46
+ if (s->base.num_insns > 1 &&
45
+ CPUState *cpu = env_cpu(env);
47
+ !is_same_page(&s->base, s->pc + num_bytes - 1)) {
48
+ siglongjmp(s->jmpbuf, 2);
49
+ }
50
+
46
+
51
s->pc += num_bytes;
47
+ if (!cpu->ignore_memory_transaction_failures) {
52
if (unlikely(s->pc - s->pc_start > X86_MAX_INSN_LENGTH)) {
48
+ CPUClass *cc = CPU_GET_CLASS(cpu);
53
/* If the instruction's 16th byte is on a different page than the 1st, a
54
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
55
int modrm, reg, rm, mod, op, opreg, val;
56
target_ulong next_eip, tval;
57
target_ulong pc_start = s->base.pc_next;
58
+ bool orig_cc_op_dirty = s->cc_op_dirty;
59
+ CCOp orig_cc_op = s->cc_op;
60
61
s->pc_start = s->pc = pc_start;
62
s->override = -1;
63
@@ -XXX,XX +XXX,XX @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
64
s->rip_offset = 0; /* for relative ip address */
65
s->vex_l = 0;
66
s->vex_v = 0;
67
- if (sigsetjmp(s->jmpbuf, 0) != 0) {
68
+ switch (sigsetjmp(s->jmpbuf, 0)) {
69
+ case 0:
70
+ break;
71
+ case 1:
72
gen_exception_gpf(s);
73
return s->pc;
74
+ case 2:
75
+ /* Restore state that may affect the next instruction. */
76
+ s->cc_op_dirty = orig_cc_op_dirty;
77
+ s->cc_op = orig_cc_op;
78
+ s->base.num_insns--;
79
+ tcg_remove_ops_after(s->prev_insn_end);
80
+ s->base.is_jmp = DISAS_TOO_MANY;
81
+ return pc_start;
82
+ default:
83
+ g_assert_not_reached();
84
}
85
86
prefixes = 0;
87
@@ -XXX,XX +XXX,XX @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
88
{
89
DisasContext *dc = container_of(dcbase, DisasContext, base);
90
91
+ dc->prev_insn_end = tcg_last_op();
92
tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
93
}
94
95
@@ -XXX,XX +XXX,XX @@ static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
96
#endif
97
98
pc_next = disas_insn(dc, cpu);
99
-
100
- if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
101
- /* if single step mode, we generate only one instruction and
102
- generate an exception */
103
- /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
104
- the flag and abort the translation to give the irqs a
105
- chance to happen */
106
- dc->base.is_jmp = DISAS_TOO_MANY;
107
- } else if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT)
108
- && ((pc_next & TARGET_PAGE_MASK)
109
- != ((pc_next + TARGET_MAX_INSN_SIZE - 1)
110
- & TARGET_PAGE_MASK)
111
- || (pc_next & ~TARGET_PAGE_MASK) == 0)) {
112
- /* Do not cross the boundary of the pages in icount mode,
113
- it can cause an exception. Do it only when boundary is
114
- crossed by the first instruction in the block.
115
- If current instruction already crossed the bound - it's ok,
116
- because an exception hasn't stopped this code.
117
- */
118
- dc->base.is_jmp = DISAS_TOO_MANY;
119
- } else if ((pc_next - dc->base.pc_first) >= (TARGET_PAGE_SIZE - 32)) {
120
- dc->base.is_jmp = DISAS_TOO_MANY;
121
- }
122
-
123
dc->base.pc_next = pc_next;
124
+
49
+
125
+ if (dc->base.is_jmp == DISAS_NEXT) {
50
+ if (cc->tcg_ops->do_transaction_failed) {
126
+ if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
51
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
127
+ /*
52
+
128
+ * If single step mode, we generate only one instruction and
53
+ cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
129
+ * generate an exception.
54
+ access_type, mmu_idx,
130
+ * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
55
+ full->attrs, response, retaddr);
131
+ * the flag and abort the translation to give the irqs a
132
+ * chance to happen.
133
+ */
134
+ dc->base.is_jmp = DISAS_TOO_MANY;
135
+ } else if (!is_same_page(&dc->base, pc_next)) {
136
+ dc->base.is_jmp = DISAS_TOO_MANY;
137
+ }
56
+ }
138
+ }
57
+ }
139
}
58
}
140
59
141
static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
60
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
142
diff --git a/tests/tcg/x86_64/noexec.c b/tests/tcg/x86_64/noexec.c
143
new file mode 100644
144
index XXXXXXX..XXXXXXX
145
--- /dev/null
146
+++ b/tests/tcg/x86_64/noexec.c
147
@@ -XXX,XX +XXX,XX @@
148
+#include "../multiarch/noexec.c.inc"
149
+
150
+static void *arch_mcontext_pc(const mcontext_t *ctx)
151
+{
152
+ return (void *)ctx->gregs[REG_RIP];
153
+}
154
+
155
+int arch_mcontext_arg(const mcontext_t *ctx)
156
+{
157
+ return ctx->gregs[REG_RDI];
158
+}
159
+
160
+static void arch_flush(void *p, int len)
161
+{
162
+}
163
+
164
+extern char noexec_1[];
165
+extern char noexec_2[];
166
+extern char noexec_end[];
167
+
168
+asm("noexec_1:\n"
169
+ " movq $1,%rdi\n" /* %rdi is 0 on entry, set 1. */
170
+ "noexec_2:\n"
171
+ " movq $2,%rdi\n" /* %rdi is 0/1; set 2. */
172
+ " ret\n"
173
+ "noexec_end:");
174
+
175
+int main(void)
176
+{
177
+ struct noexec_test noexec_tests[] = {
178
+ {
179
+ .name = "fallthrough",
180
+ .test_code = noexec_1,
181
+ .test_len = noexec_end - noexec_1,
182
+ .page_ofs = noexec_1 - noexec_2,
183
+ .entry_ofs = noexec_1 - noexec_2,
184
+ .expected_si_ofs = 0,
185
+ .expected_pc_ofs = 0,
186
+ .expected_arg = 1,
187
+ },
188
+ {
189
+ .name = "jump",
190
+ .test_code = noexec_1,
191
+ .test_len = noexec_end - noexec_1,
192
+ .page_ofs = noexec_1 - noexec_2,
193
+ .entry_ofs = 0,
194
+ .expected_si_ofs = 0,
195
+ .expected_pc_ofs = 0,
196
+ .expected_arg = 0,
197
+ },
198
+ {
199
+ .name = "fallthrough [cross]",
200
+ .test_code = noexec_1,
201
+ .test_len = noexec_end - noexec_1,
202
+ .page_ofs = noexec_1 - noexec_2 - 2,
203
+ .entry_ofs = noexec_1 - noexec_2 - 2,
204
+ .expected_si_ofs = 0,
205
+ .expected_pc_ofs = -2,
206
+ .expected_arg = 1,
207
+ },
208
+ {
209
+ .name = "jump [cross]",
210
+ .test_code = noexec_1,
211
+ .test_len = noexec_end - noexec_1,
212
+ .page_ofs = noexec_1 - noexec_2 - 2,
213
+ .entry_ofs = -2,
214
+ .expected_si_ofs = 0,
215
+ .expected_pc_ofs = -2,
216
+ .expected_arg = 0,
217
+ },
218
+ };
219
+
220
+ return test_noexec(noexec_tests,
221
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
222
+}
223
diff --git a/tests/tcg/x86_64/Makefile.target b/tests/tcg/x86_64/Makefile.target
224
index XXXXXXX..XXXXXXX 100644
225
--- a/tests/tcg/x86_64/Makefile.target
226
+++ b/tests/tcg/x86_64/Makefile.target
227
@@ -XXX,XX +XXX,XX @@ include $(SRC_PATH)/tests/tcg/i386/Makefile.target
228
229
ifeq ($(filter %-linux-user, $(TARGET)),$(TARGET))
230
X86_64_TESTS += vsyscall
231
+X86_64_TESTS += noexec
232
TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64
233
else
234
TESTS=$(MULTIARCH_TESTS)
235
@@ -XXX,XX +XXX,XX @@ test-x86_64: LDFLAGS+=-lm -lc
236
test-x86_64: test-i386.c test-i386.h test-i386-shift.h test-i386-muldiv.h
237
    $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
238
239
-vsyscall: $(SRC_PATH)/tests/tcg/x86_64/vsyscall.c
240
+%: $(SRC_PATH)/tests/tcg/x86_64/%.c
241
    $(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
242
--
61
--
243
2.34.1
62
2.34.1
63
64
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
2
3
Currently it's possible to execute pages that do not have PAGE_EXEC
4
if there is an existing translation block. Fix by invalidating TBs
5
that touch the affected pages.
6
7
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Message-Id: <20220817150506.592862-2-iii@linux.ibm.com>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
2
---
11
linux-user/mmap.c | 6 ++++--
3
accel/tcg/cputlb.c | 6 ++++--
12
1 file changed, 4 insertions(+), 2 deletions(-)
4
1 file changed, 4 insertions(+), 2 deletions(-)
13
5
14
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
6
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
15
index XXXXXXX..XXXXXXX 100644
7
index XXXXXXX..XXXXXXX 100644
16
--- a/linux-user/mmap.c
8
--- a/accel/tcg/cputlb.c
17
+++ b/linux-user/mmap.c
9
+++ b/accel/tcg/cputlb.c
18
@@ -XXX,XX +XXX,XX @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
10
@@ -XXX,XX +XXX,XX @@ static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
19
goto error;
11
MMUAccessType type, uintptr_t ra)
20
}
12
{
13
if (unlikely(p->flags & TLB_MMIO)) {
14
- return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB);
15
+ QEMU_IOTHREAD_LOCK_GUARD();
16
+ return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
17
} else {
18
return *(uint8_t *)p->haddr;
21
}
19
}
22
+
20
@@ -XXX,XX +XXX,XX @@ static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
23
page_set_flags(start, start + len, page_flags);
21
int mmu_idx, uintptr_t ra)
24
- mmap_unlock();
22
{
25
- return 0;
23
if (unlikely(p->flags & TLB_MMIO)) {
26
+ tb_invalidate_phys_range(start, start + len);
24
- io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB);
27
+ ret = 0;
25
+ QEMU_IOTHREAD_LOCK_GUARD();
28
+
26
+ do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
29
error:
27
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
30
mmap_unlock();
28
/* nothing */
31
return ret;
29
} else {
32
--
30
--
33
2.34.1
31
2.34.1
diff view generated by jsdifflib
1
The base qemu_ram_addr_from_host function is already in
1
Avoid multiple calls to io_prepare for unaligned acceses.
2
softmmu/physmem.c; move the nofail version to be adjacent.
2
One call to do_ld_mmio_beN will never cross pages.
3
3
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
5
---
9
include/exec/cpu-common.h | 1 +
6
accel/tcg/cputlb.c | 84 +++++++++++++++++-----------------------------
10
accel/tcg/cputlb.c | 12 ------------
7
1 file changed, 30 insertions(+), 54 deletions(-)
11
softmmu/physmem.c | 12 ++++++++++++
12
3 files changed, 13 insertions(+), 12 deletions(-)
13
8
14
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/cpu-common.h
17
+++ b/include/exec/cpu-common.h
18
@@ -XXX,XX +XXX,XX @@ typedef uintptr_t ram_addr_t;
19
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
20
/* This should not be used by devices. */
21
ram_addr_t qemu_ram_addr_from_host(void *ptr);
22
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
23
RAMBlock *qemu_ram_block_by_name(const char *name);
24
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
25
ram_addr_t *offset);
26
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
27
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
28
--- a/accel/tcg/cputlb.c
11
--- a/accel/tcg/cputlb.c
29
+++ b/accel/tcg/cputlb.c
12
+++ b/accel/tcg/cputlb.c
30
@@ -XXX,XX +XXX,XX @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
13
@@ -XXX,XX +XXX,XX @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
31
prot, mmu_idx, size);
14
}
32
}
15
}
33
16
34
-static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
17
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
18
- int mmu_idx, vaddr addr, uintptr_t retaddr,
19
- MMUAccessType access_type, MemOp op)
35
-{
20
-{
36
- ram_addr_t ram_addr;
21
- MemoryRegionSection *section;
22
- hwaddr mr_offset;
23
- MemoryRegion *mr;
24
- MemTxResult r;
25
- uint64_t val;
37
-
26
-
38
- ram_addr = qemu_ram_addr_from_host(ptr);
27
- section = io_prepare(&mr_offset, env, full->xlat_section,
39
- if (ram_addr == RAM_ADDR_INVALID) {
28
- full->attrs, addr, retaddr);
40
- error_report("Bad ram pointer %p", ptr);
29
- mr = section->mr;
41
- abort();
30
-
31
- {
32
- QEMU_IOTHREAD_LOCK_GUARD();
33
- r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
42
- }
34
- }
43
- return ram_addr;
35
-
36
- if (r != MEMTX_OK) {
37
- io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
38
- r, retaddr);
39
- }
40
- return val;
44
-}
41
-}
45
-
42
-
46
/*
43
static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
47
* Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
44
int mmu_idx, uint64_t val, vaddr addr,
48
* caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
45
uintptr_t retaddr, MemOp op)
49
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
46
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
50
index XXXXXXX..XXXXXXX 100644
47
uint64_t ret_be, vaddr addr, int size,
51
--- a/softmmu/physmem.c
48
int mmu_idx, MMUAccessType type, uintptr_t ra)
52
+++ b/softmmu/physmem.c
49
{
53
@@ -XXX,XX +XXX,XX @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
50
- uint64_t t;
54
return block->offset + offset;
51
+ MemoryRegionSection *section;
52
+ hwaddr mr_offset;
53
+ MemoryRegion *mr;
54
+ MemTxAttrs attrs;
55
56
tcg_debug_assert(size > 0 && size <= 8);
57
+
58
+ attrs = full->attrs;
59
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
60
+ mr = section->mr;
61
+
62
do {
63
+ MemOp this_mop;
64
+ unsigned this_size;
65
+ uint64_t val;
66
+ MemTxResult r;
67
+
68
/* Read aligned pieces up to 8 bytes. */
69
- switch ((size | (int)addr) & 7) {
70
- case 1:
71
- case 3:
72
- case 5:
73
- case 7:
74
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_UB);
75
- ret_be = (ret_be << 8) | t;
76
- size -= 1;
77
- addr += 1;
78
- break;
79
- case 2:
80
- case 6:
81
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUW);
82
- ret_be = (ret_be << 16) | t;
83
- size -= 2;
84
- addr += 2;
85
- break;
86
- case 4:
87
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUL);
88
- ret_be = (ret_be << 32) | t;
89
- size -= 4;
90
- addr += 4;
91
- break;
92
- case 0:
93
- return io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUQ);
94
- default:
95
- qemu_build_not_reached();
96
+ this_mop = ctz32(size | (int)addr | 8);
97
+ this_size = 1 << this_mop;
98
+ this_mop |= MO_BE;
99
+
100
+ r = memory_region_dispatch_read(mr, mr_offset, &val, this_mop, attrs);
101
+ if (unlikely(r != MEMTX_OK)) {
102
+ io_failed(env, full, addr, this_size, type, mmu_idx, r, ra);
103
}
104
+ if (this_size == 8) {
105
+ return val;
106
+ }
107
+
108
+ ret_be = (ret_be << (this_size * 8)) | val;
109
+ addr += this_size;
110
+ mr_offset += this_size;
111
+ size -= this_size;
112
} while (size);
113
+
114
return ret_be;
55
}
115
}
56
116
57
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
58
+{
59
+ ram_addr_t ram_addr;
60
+
61
+ ram_addr = qemu_ram_addr_from_host(ptr);
62
+ if (ram_addr == RAM_ADDR_INVALID) {
63
+ error_report("Bad ram pointer %p", ptr);
64
+ abort();
65
+ }
66
+ return ram_addr;
67
+}
68
+
69
static MemTxResult flatview_read(FlatView *fv, hwaddr addr,
70
MemTxAttrs attrs, void *buf, hwaddr len);
71
static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
72
--
117
--
73
2.34.1
118
2.34.1
diff view generated by jsdifflib
1
Simplify the implementation of get_page_addr_code_hostp
1
Avoid multiple calls to io_prepare for unaligned acceses.
2
by reusing the existing probe_access infrastructure.
2
One call to do_st_mmio_leN will never cross pages.
3
3
4
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
accel/tcg/cputlb.c | 76 ++++++++++++++++------------------------------
6
accel/tcg/cputlb.c | 82 +++++++++++++++++-----------------------------
9
1 file changed, 26 insertions(+), 50 deletions(-)
7
1 file changed, 30 insertions(+), 52 deletions(-)
10
8
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
9
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
12
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cputlb.c
11
--- a/accel/tcg/cputlb.c
14
+++ b/accel/tcg/cputlb.c
12
+++ b/accel/tcg/cputlb.c
15
@@ -XXX,XX +XXX,XX @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
13
@@ -XXX,XX +XXX,XX @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
16
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
14
}
17
(ADDR) & TARGET_PAGE_MASK)
15
}
18
16
19
-/*
17
-static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
20
- * Return a ram_addr_t for the virtual address for execution.
18
- int mmu_idx, uint64_t val, vaddr addr,
21
- *
19
- uintptr_t retaddr, MemOp op)
22
- * Return -1 if we can't translate and execute from an entire page
23
- * of RAM. This will force us to execute by loading and translating
24
- * one insn at a time, without caching.
25
- *
26
- * NOTE: This function will trigger an exception if the page is
27
- * not executable.
28
- */
29
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
30
- void **hostp)
31
-{
20
-{
32
- uintptr_t mmu_idx = cpu_mmu_index(env, true);
21
- MemoryRegionSection *section;
33
- uintptr_t index = tlb_index(env, mmu_idx, addr);
22
- hwaddr mr_offset;
34
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
23
- MemoryRegion *mr;
35
- void *p;
24
- MemTxResult r;
36
-
25
-
37
- if (unlikely(!tlb_hit(entry->addr_code, addr))) {
26
- section = io_prepare(&mr_offset, env, full->xlat_section,
38
- if (!VICTIM_TLB_HIT(addr_code, addr)) {
27
- full->attrs, addr, retaddr);
39
- tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
28
- mr = section->mr;
40
- index = tlb_index(env, mmu_idx, addr);
41
- entry = tlb_entry(env, mmu_idx, addr);
42
-
29
-
43
- if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
30
- {
44
- /*
31
- QEMU_IOTHREAD_LOCK_GUARD();
45
- * The MMU protection covers a smaller range than a target
32
- r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
46
- * page, so we must redo the MMU check for every insn.
47
- */
48
- return -1;
49
- }
50
- }
51
- assert(tlb_hit(entry->addr_code, addr));
52
- }
33
- }
53
-
34
-
54
- if (unlikely(entry->addr_code & TLB_MMIO)) {
35
- if (r != MEMTX_OK) {
55
- /* The region is not backed by RAM. */
36
- io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
56
- if (hostp) {
37
- r, retaddr);
57
- *hostp = NULL;
58
- }
59
- return -1;
60
- }
38
- }
61
-
62
- p = (void *)((uintptr_t)addr + entry->addend);
63
- if (hostp) {
64
- *hostp = p;
65
- }
66
- return qemu_ram_addr_from_host_nofail(p);
67
-}
39
-}
68
-
40
-
69
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
41
/* Return true if ADDR is present in the victim tlb, and has been copied
70
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
42
back to the main tlb. */
43
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
44
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
45
uint64_t val_le, vaddr addr, int size,
46
int mmu_idx, uintptr_t ra)
71
{
47
{
72
@@ -XXX,XX +XXX,XX @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
48
+ MemoryRegionSection *section;
73
return flags ? NULL : host;
49
+ hwaddr mr_offset;
74
}
50
+ MemoryRegion *mr;
75
51
+ MemTxAttrs attrs;
76
+/*
77
+ * Return a ram_addr_t for the virtual address for execution.
78
+ *
79
+ * Return -1 if we can't translate and execute from an entire page
80
+ * of RAM. This will force us to execute by loading and translating
81
+ * one insn at a time, without caching.
82
+ *
83
+ * NOTE: This function will trigger an exception if the page is
84
+ * not executable.
85
+ */
86
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
87
+ void **hostp)
88
+{
89
+ void *p;
90
+
52
+
91
+ (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
53
tcg_debug_assert(size > 0 && size <= 8);
92
+ cpu_mmu_index(env, true), false, &p, 0);
54
93
+ if (p == NULL) {
55
+ attrs = full->attrs;
94
+ return -1;
56
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
95
+ }
57
+ mr = section->mr;
96
+ if (hostp) {
97
+ *hostp = p;
98
+ }
99
+ return qemu_ram_addr_from_host_nofail(p);
100
+}
101
+
58
+
102
#ifdef CONFIG_PLUGIN
59
do {
103
/*
60
+ MemOp this_mop;
104
* Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
61
+ unsigned this_size;
62
+ MemTxResult r;
63
+
64
/* Store aligned pieces up to 8 bytes. */
65
- switch ((size | (int)addr) & 7) {
66
- case 1:
67
- case 3:
68
- case 5:
69
- case 7:
70
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_UB);
71
- val_le >>= 8;
72
- size -= 1;
73
- addr += 1;
74
- break;
75
- case 2:
76
- case 6:
77
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUW);
78
- val_le >>= 16;
79
- size -= 2;
80
- addr += 2;
81
- break;
82
- case 4:
83
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUL);
84
- val_le >>= 32;
85
- size -= 4;
86
- addr += 4;
87
- break;
88
- case 0:
89
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUQ);
90
- return 0;
91
- default:
92
- qemu_build_not_reached();
93
+ this_mop = ctz32(size | (int)addr | 8);
94
+ this_size = 1 << this_mop;
95
+ this_mop |= MO_LE;
96
+
97
+ r = memory_region_dispatch_write(mr, mr_offset, val_le,
98
+ this_mop, attrs);
99
+ if (unlikely(r != MEMTX_OK)) {
100
+ io_failed(env, full, addr, this_size, MMU_DATA_STORE,
101
+ mmu_idx, r, ra);
102
}
103
+ if (this_size == 8) {
104
+ return 0;
105
+ }
106
+
107
+ val_le >>= this_size * 8;
108
+ addr += this_size;
109
+ mr_offset += this_size;
110
+ size -= this_size;
111
} while (size);
112
113
return val_le;
105
--
114
--
106
2.34.1
115
2.34.1
diff view generated by jsdifflib
1
From: Ilya Leoshkevich <iii@linux.ibm.com>
1
Split out int_ld_mmio_beN, to be used by both do_ld_mmio_beN
2
and do_ld16_mmio_beN. Move the locks down into the two
3
functions, since each one now covers all accesses to once page.
2
4
3
Introduce a function that checks whether a given address is on the same
4
page as where disassembly started. Having it improves readability of
5
the following patches.
6
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Message-Id: <20220811095534.241224-3-iii@linux.ibm.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
[rth: Make the DisasContextBase parameter const.]
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
6
---
14
include/exec/translator.h | 10 ++++++++++
7
accel/tcg/cputlb.c | 91 ++++++++++++++++++++++++++++++----------------
15
1 file changed, 10 insertions(+)
8
1 file changed, 59 insertions(+), 32 deletions(-)
16
9
17
diff --git a/include/exec/translator.h b/include/exec/translator.h
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
18
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/translator.h
12
--- a/accel/tcg/cputlb.c
20
+++ b/include/exec/translator.h
13
+++ b/accel/tcg/cputlb.c
21
@@ -XXX,XX +XXX,XX @@ FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
14
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
22
15
* Load @size bytes from @addr, which is memory-mapped i/o.
23
#undef GEN_TRANSLATOR_LD
16
* The bytes are concatenated in big-endian order with @ret_be.
24
17
*/
25
+/*
18
-static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
26
+ * Return whether addr is on the same page as where disassembly started.
19
- uint64_t ret_be, vaddr addr, int size,
27
+ * Translators can use this to enforce the rule that only single-insn
20
- int mmu_idx, MMUAccessType type, uintptr_t ra)
28
+ * translation blocks are allowed to cross page boundaries.
21
+static uint64_t int_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
29
+ */
22
+ uint64_t ret_be, vaddr addr, int size,
30
+static inline bool is_same_page(const DisasContextBase *db, target_ulong addr)
23
+ int mmu_idx, MMUAccessType type, uintptr_t ra,
24
+ MemoryRegion *mr, hwaddr mr_offset)
25
{
26
- MemoryRegionSection *section;
27
- hwaddr mr_offset;
28
- MemoryRegion *mr;
29
- MemTxAttrs attrs;
30
-
31
- tcg_debug_assert(size > 0 && size <= 8);
32
-
33
- attrs = full->attrs;
34
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
35
- mr = section->mr;
36
-
37
do {
38
MemOp this_mop;
39
unsigned this_size;
40
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
41
this_size = 1 << this_mop;
42
this_mop |= MO_BE;
43
44
- r = memory_region_dispatch_read(mr, mr_offset, &val, this_mop, attrs);
45
+ r = memory_region_dispatch_read(mr, mr_offset, &val,
46
+ this_mop, full->attrs);
47
if (unlikely(r != MEMTX_OK)) {
48
io_failed(env, full, addr, this_size, type, mmu_idx, r, ra);
49
}
50
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
51
return ret_be;
52
}
53
54
+static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
55
+ uint64_t ret_be, vaddr addr, int size,
56
+ int mmu_idx, MMUAccessType type, uintptr_t ra)
31
+{
57
+{
32
+ return ((addr ^ db->pc_first) & TARGET_PAGE_MASK) == 0;
58
+ MemoryRegionSection *section;
59
+ MemoryRegion *mr;
60
+ hwaddr mr_offset;
61
+ MemTxAttrs attrs;
62
+ uint64_t ret;
63
+
64
+ tcg_debug_assert(size > 0 && size <= 8);
65
+
66
+ attrs = full->attrs;
67
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
68
+ mr = section->mr;
69
+
70
+ qemu_mutex_lock_iothread();
71
+ ret = int_ld_mmio_beN(env, full, ret_be, addr, size, mmu_idx,
72
+ type, ra, mr, mr_offset);
73
+ qemu_mutex_unlock_iothread();
74
+
75
+ return ret;
33
+}
76
+}
34
+
77
+
35
#endif /* EXEC__TRANSLATOR_H */
78
+static Int128 do_ld16_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
79
+ uint64_t ret_be, vaddr addr, int size,
80
+ int mmu_idx, uintptr_t ra)
81
+{
82
+ MemoryRegionSection *section;
83
+ MemoryRegion *mr;
84
+ hwaddr mr_offset;
85
+ MemTxAttrs attrs;
86
+ uint64_t a, b;
87
+
88
+ tcg_debug_assert(size > 8 && size <= 16);
89
+
90
+ attrs = full->attrs;
91
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
92
+ mr = section->mr;
93
+
94
+ qemu_mutex_lock_iothread();
95
+ a = int_ld_mmio_beN(env, full, ret_be, addr, size - 8, mmu_idx,
96
+ MMU_DATA_LOAD, ra, mr, mr_offset);
97
+ b = int_ld_mmio_beN(env, full, ret_be, addr + size - 8, 8, mmu_idx,
98
+ MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
99
+ qemu_mutex_unlock_iothread();
100
+
101
+ return int128_make128(b, a);
102
+}
103
+
104
/**
105
* do_ld_bytes_beN
106
* @p: translation parameters
107
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
108
unsigned tmp, half_size;
109
110
if (unlikely(p->flags & TLB_MMIO)) {
111
- QEMU_IOTHREAD_LOCK_GUARD();
112
return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size,
113
mmu_idx, type, ra);
114
}
115
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
116
MemOp atom;
117
118
if (unlikely(p->flags & TLB_MMIO)) {
119
- QEMU_IOTHREAD_LOCK_GUARD();
120
- a = do_ld_mmio_beN(env, p->full, a, p->addr, size - 8,
121
- mmu_idx, MMU_DATA_LOAD, ra);
122
- b = do_ld_mmio_beN(env, p->full, 0, p->addr + 8, 8,
123
- mmu_idx, MMU_DATA_LOAD, ra);
124
- return int128_make128(b, a);
125
+ return do_ld16_mmio_beN(env, p->full, a, p->addr, size, mmu_idx, ra);
126
}
127
128
/*
129
@@ -XXX,XX +XXX,XX @@ static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
130
MMUAccessType type, uintptr_t ra)
131
{
132
if (unlikely(p->flags & TLB_MMIO)) {
133
- QEMU_IOTHREAD_LOCK_GUARD();
134
return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
135
} else {
136
return *(uint8_t *)p->haddr;
137
@@ -XXX,XX +XXX,XX @@ static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
138
uint16_t ret;
139
140
if (unlikely(p->flags & TLB_MMIO)) {
141
- QEMU_IOTHREAD_LOCK_GUARD();
142
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra);
143
if ((memop & MO_BSWAP) == MO_LE) {
144
ret = bswap16(ret);
145
@@ -XXX,XX +XXX,XX @@ static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
146
uint32_t ret;
147
148
if (unlikely(p->flags & TLB_MMIO)) {
149
- QEMU_IOTHREAD_LOCK_GUARD();
150
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra);
151
if ((memop & MO_BSWAP) == MO_LE) {
152
ret = bswap32(ret);
153
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
154
uint64_t ret;
155
156
if (unlikely(p->flags & TLB_MMIO)) {
157
- QEMU_IOTHREAD_LOCK_GUARD();
158
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra);
159
if ((memop & MO_BSWAP) == MO_LE) {
160
ret = bswap64(ret);
161
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
162
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
163
if (likely(!crosspage)) {
164
if (unlikely(l.page[0].flags & TLB_MMIO)) {
165
- QEMU_IOTHREAD_LOCK_GUARD();
166
- a = do_ld_mmio_beN(env, l.page[0].full, 0, addr, 8,
167
- l.mmu_idx, MMU_DATA_LOAD, ra);
168
- b = do_ld_mmio_beN(env, l.page[0].full, 0, addr + 8, 8,
169
- l.mmu_idx, MMU_DATA_LOAD, ra);
170
- ret = int128_make128(b, a);
171
+ ret = do_ld16_mmio_beN(env, l.page[0].full, 0, addr, 16,
172
+ l.mmu_idx, ra);
173
if ((l.memop & MO_BSWAP) == MO_LE) {
174
ret = bswap128(ret);
175
}
36
--
176
--
37
2.34.1
177
2.34.1
diff view generated by jsdifflib
1
Cache the translation from guest to host address, so we may
1
Split out int_st_mmio_leN, to be used by both do_st_mmio_leN
2
use direct loads when we hit on the primary translation page.
2
and do_st16_mmio_leN. Move the locks down into the two
3
functions, since each one now covers all accesses to once page.
3
4
4
Look up the second translation page only once, during translation.
5
This obviates another lookup of the second page within tb_gen_code
6
after translation.
7
8
Fixes a bug in that plugin_insn_append should be passed the bytes
9
in the original memory order, not bswapped by pieces.
10
11
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
12
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
6
---
15
include/exec/translator.h | 63 +++++++++++--------
7
accel/tcg/cputlb.c | 88 ++++++++++++++++++++++++++++++----------------
16
accel/tcg/translate-all.c | 23 +++----
8
1 file changed, 58 insertions(+), 30 deletions(-)
17
accel/tcg/translator.c | 126 +++++++++++++++++++++++++++++---------
18
3 files changed, 141 insertions(+), 71 deletions(-)
19
9
20
diff --git a/include/exec/translator.h b/include/exec/translator.h
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
21
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
22
--- a/include/exec/translator.h
12
--- a/accel/tcg/cputlb.c
23
+++ b/include/exec/translator.h
13
+++ b/accel/tcg/cputlb.c
24
@@ -XXX,XX +XXX,XX @@ typedef enum DisasJumpType {
14
@@ -XXX,XX +XXX,XX @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
25
* Architecture-agnostic disassembly context.
15
* The bytes to store are extracted in little-endian order from @val_le;
16
* return the bytes of @val_le beyond @p->size that have not been stored.
26
*/
17
*/
27
typedef struct DisasContextBase {
18
-static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
28
- const TranslationBlock *tb;
19
- uint64_t val_le, vaddr addr, int size,
29
+ TranslationBlock *tb;
20
- int mmu_idx, uintptr_t ra)
30
target_ulong pc_first;
21
+static uint64_t int_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
31
target_ulong pc_next;
22
+ uint64_t val_le, vaddr addr, int size,
32
DisasJumpType is_jmp;
23
+ int mmu_idx, uintptr_t ra,
33
int num_insns;
24
+ MemoryRegion *mr, hwaddr mr_offset)
34
int max_insns;
25
{
35
bool singlestep_enabled;
26
- MemoryRegionSection *section;
36
-#ifdef CONFIG_USER_ONLY
27
- hwaddr mr_offset;
37
- /*
28
- MemoryRegion *mr;
38
- * Guest address of the last byte of the last protected page.
29
- MemTxAttrs attrs;
39
- *
30
-
40
- * Pages containing the translated instructions are made non-writable in
31
- tcg_debug_assert(size > 0 && size <= 8);
41
- * order to achieve consistency in case another thread is modifying the
32
-
42
- * code while translate_insn() fetches the instruction bytes piecemeal.
33
- attrs = full->attrs;
43
- * Such writer threads are blocked on mmap_lock() in page_unprotect().
34
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
44
- */
35
- mr = section->mr;
45
- target_ulong page_protect_end;
36
-
46
-#endif
37
do {
47
+ void *host_addr[2];
38
MemOp this_mop;
48
} DisasContextBase;
39
unsigned this_size;
49
40
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
50
/**
41
this_mop |= MO_LE;
51
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest);
42
52
* the relevant information at translation time.
43
r = memory_region_dispatch_write(mr, mr_offset, val_le,
53
*/
44
- this_mop, attrs);
54
45
+ this_mop, full->attrs);
55
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
46
if (unlikely(r != MEMTX_OK)) {
56
- type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
47
io_failed(env, full, addr, this_size, MMU_DATA_STORE,
57
- abi_ptr pc, bool do_swap); \
48
mmu_idx, r, ra);
58
- static inline type fullname(CPUArchState *env, \
49
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
59
- DisasContextBase *dcbase, abi_ptr pc) \
50
return val_le;
60
- { \
51
}
61
- return fullname ## _swap(env, dcbase, pc, false); \
52
62
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
53
+static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
63
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
54
+ uint64_t val_le, vaddr addr, int size,
64
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
55
+ int mmu_idx, uintptr_t ra)
65
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc);
56
+{
57
+ MemoryRegionSection *section;
58
+ hwaddr mr_offset;
59
+ MemoryRegion *mr;
60
+ MemTxAttrs attrs;
61
+ uint64_t ret;
66
+
62
+
67
+static inline uint16_t
63
+ tcg_debug_assert(size > 0 && size <= 8);
68
+translator_lduw_swap(CPUArchState *env, DisasContextBase *db,
69
+ abi_ptr pc, bool do_swap)
70
+{
71
+ uint16_t ret = translator_lduw(env, db, pc);
72
+ if (do_swap) {
73
+ ret = bswap16(ret);
74
}
75
+ return ret;
76
+}
77
78
-#define FOR_EACH_TRANSLATOR_LD(F) \
79
- F(translator_ldub, uint8_t, cpu_ldub_code, /* no swap */) \
80
- F(translator_lduw, uint16_t, cpu_lduw_code, bswap16) \
81
- F(translator_ldl, uint32_t, cpu_ldl_code, bswap32) \
82
- F(translator_ldq, uint64_t, cpu_ldq_code, bswap64)
83
+static inline uint32_t
84
+translator_ldl_swap(CPUArchState *env, DisasContextBase *db,
85
+ abi_ptr pc, bool do_swap)
86
+{
87
+ uint32_t ret = translator_ldl(env, db, pc);
88
+ if (do_swap) {
89
+ ret = bswap32(ret);
90
+ }
91
+ return ret;
92
+}
93
94
-FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
95
-
96
-#undef GEN_TRANSLATOR_LD
97
+static inline uint64_t
98
+translator_ldq_swap(CPUArchState *env, DisasContextBase *db,
99
+ abi_ptr pc, bool do_swap)
100
+{
101
+ uint64_t ret = translator_ldq_swap(env, db, pc, false);
102
+ if (do_swap) {
103
+ ret = bswap64(ret);
104
+ }
105
+ return ret;
106
+}
107
108
/*
109
* Return whether addr is on the same page as where disassembly started.
110
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
111
index XXXXXXX..XXXXXXX 100644
112
--- a/accel/tcg/translate-all.c
113
+++ b/accel/tcg/translate-all.c
114
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
115
{
116
CPUArchState *env = cpu->env_ptr;
117
TranslationBlock *tb, *existing_tb;
118
- tb_page_addr_t phys_pc, phys_page2;
119
- target_ulong virt_page2;
120
+ tb_page_addr_t phys_pc;
121
tcg_insn_unit *gen_code_buf;
122
int gen_code_size, search_size, max_insns;
123
#ifdef CONFIG_PROFILER
124
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
125
tb->flags = flags;
126
tb->cflags = cflags;
127
tb->trace_vcpu_dstate = *cpu->trace_dstate;
128
+ tb->page_addr[0] = phys_pc;
129
+ tb->page_addr[1] = -1;
130
tcg_ctx->tb_cflags = cflags;
131
tb_overflow:
132
133
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
134
}
135
136
/*
137
- * If the TB is not associated with a physical RAM page then
138
- * it must be a temporary one-insn TB, and we have nothing to do
139
- * except fill in the page_addr[] fields. Return early before
140
- * attempting to link to other TBs or add to the lookup table.
141
+ * If the TB is not associated with a physical RAM page then it must be
142
+ * a temporary one-insn TB, and we have nothing left to do. Return early
143
+ * before attempting to link to other TBs or add to the lookup table.
144
*/
145
- if (phys_pc == -1) {
146
- tb->page_addr[0] = tb->page_addr[1] = -1;
147
+ if (tb->page_addr[0] == -1) {
148
return tb;
149
}
150
151
@@ -XXX,XX +XXX,XX @@ TranslationBlock *tb_gen_code(CPUState *cpu,
152
*/
153
tcg_tb_insert(tb);
154
155
- /* check next page if needed */
156
- virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
157
- phys_page2 = -1;
158
- if ((pc & TARGET_PAGE_MASK) != virt_page2) {
159
- phys_page2 = get_page_addr_code(env, virt_page2);
160
- }
161
/*
162
* No explicit memory barrier is required -- tb_link_page() makes the
163
* TB visible in a consistent state.
164
*/
165
- existing_tb = tb_link_page(tb, phys_pc, phys_page2);
166
+ existing_tb = tb_link_page(tb, tb->page_addr[0], tb->page_addr[1]);
167
/* if the TB already exists, discard what we just translated */
168
if (unlikely(existing_tb != tb)) {
169
uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
170
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
171
index XXXXXXX..XXXXXXX 100644
172
--- a/accel/tcg/translator.c
173
+++ b/accel/tcg/translator.c
174
@@ -XXX,XX +XXX,XX @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
175
return ((db->pc_first ^ dest) & TARGET_PAGE_MASK) == 0;
176
}
177
178
-static inline void translator_page_protect(DisasContextBase *dcbase,
179
- target_ulong pc)
180
-{
181
-#ifdef CONFIG_USER_ONLY
182
- dcbase->page_protect_end = pc | ~TARGET_PAGE_MASK;
183
- page_protect(pc);
184
-#endif
185
-}
186
-
187
void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
188
target_ulong pc, void *host_pc,
189
const TranslatorOps *ops, DisasContextBase *db)
190
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
191
db->num_insns = 0;
192
db->max_insns = max_insns;
193
db->singlestep_enabled = cflags & CF_SINGLE_STEP;
194
- translator_page_protect(db, db->pc_next);
195
+ db->host_addr[0] = host_pc;
196
+ db->host_addr[1] = NULL;
197
+
64
+
198
+#ifdef CONFIG_USER_ONLY
65
+ attrs = full->attrs;
199
+ page_protect(pc);
66
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
200
+#endif
67
+ mr = section->mr;
201
202
ops->init_disas_context(db, cpu);
203
tcg_debug_assert(db->is_jmp == DISAS_NEXT); /* no early exit */
204
@@ -XXX,XX +XXX,XX @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int max_insns,
205
#endif
206
}
207
208
-static inline void translator_maybe_page_protect(DisasContextBase *dcbase,
209
- target_ulong pc, size_t len)
210
+static void *translator_access(CPUArchState *env, DisasContextBase *db,
211
+ target_ulong pc, size_t len)
212
{
213
-#ifdef CONFIG_USER_ONLY
214
- target_ulong end = pc + len - 1;
215
+ void *host;
216
+ target_ulong base, end;
217
+ TranslationBlock *tb;
218
219
- if (end > dcbase->page_protect_end) {
220
- translator_page_protect(dcbase, end);
221
+ tb = db->tb;
222
+
68
+
223
+ /* Use slow path if first page is MMIO. */
69
+ qemu_mutex_lock_iothread();
224
+ if (unlikely(tb->page_addr[0] == -1)) {
70
+ ret = int_st_mmio_leN(env, full, val_le, addr, size, mmu_idx,
225
+ return NULL;
71
+ ra, mr, mr_offset);
226
}
72
+ qemu_mutex_unlock_iothread();
227
+
73
+
228
+ end = pc + len - 1;
229
+ if (likely(is_same_page(db, end))) {
230
+ host = db->host_addr[0];
231
+ base = db->pc_first;
232
+ } else {
233
+ host = db->host_addr[1];
234
+ base = TARGET_PAGE_ALIGN(db->pc_first);
235
+ if (host == NULL) {
236
+ tb->page_addr[1] =
237
+ get_page_addr_code_hostp(env, base, &db->host_addr[1]);
238
+#ifdef CONFIG_USER_ONLY
239
+ page_protect(end);
240
#endif
241
+ /* We cannot handle MMIO as second page. */
242
+ assert(tb->page_addr[1] != -1);
243
+ host = db->host_addr[1];
244
+ }
245
+
246
+ /* Use slow path when crossing pages. */
247
+ if (is_same_page(db, pc)) {
248
+ return NULL;
249
+ }
250
+ }
251
+
252
+ tcg_debug_assert(pc >= base);
253
+ return host + (pc - base);
254
}
255
256
-#define GEN_TRANSLATOR_LD(fullname, type, load_fn, swap_fn) \
257
- type fullname ## _swap(CPUArchState *env, DisasContextBase *dcbase, \
258
- abi_ptr pc, bool do_swap) \
259
- { \
260
- translator_maybe_page_protect(dcbase, pc, sizeof(type)); \
261
- type ret = load_fn(env, pc); \
262
- if (do_swap) { \
263
- ret = swap_fn(ret); \
264
- } \
265
- plugin_insn_append(pc, &ret, sizeof(ret)); \
266
- return ret; \
267
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
268
+{
269
+ uint8_t ret;
270
+ void *p = translator_access(env, db, pc, sizeof(ret));
271
+
272
+ if (p) {
273
+ plugin_insn_append(pc, p, sizeof(ret));
274
+ return ldub_p(p);
275
}
276
+ ret = cpu_ldub_code(env, pc);
277
+ plugin_insn_append(pc, &ret, sizeof(ret));
278
+ return ret;
279
+}
280
281
-FOR_EACH_TRANSLATOR_LD(GEN_TRANSLATOR_LD)
282
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
283
+{
284
+ uint16_t ret, plug;
285
+ void *p = translator_access(env, db, pc, sizeof(ret));
286
287
-#undef GEN_TRANSLATOR_LD
288
+ if (p) {
289
+ plugin_insn_append(pc, p, sizeof(ret));
290
+ return lduw_p(p);
291
+ }
292
+ ret = cpu_lduw_code(env, pc);
293
+ plug = tswap16(ret);
294
+ plugin_insn_append(pc, &plug, sizeof(ret));
295
+ return ret;
74
+ return ret;
296
+}
75
+}
297
+
76
+
298
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
77
+static uint64_t do_st16_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
78
+ Int128 val_le, vaddr addr, int size,
79
+ int mmu_idx, uintptr_t ra)
299
+{
80
+{
300
+ uint32_t ret, plug;
81
+ MemoryRegionSection *section;
301
+ void *p = translator_access(env, db, pc, sizeof(ret));
82
+ MemoryRegion *mr;
83
+ hwaddr mr_offset;
84
+ MemTxAttrs attrs;
85
+ uint64_t ret;
302
+
86
+
303
+ if (p) {
87
+ tcg_debug_assert(size > 8 && size <= 16);
304
+ plugin_insn_append(pc, p, sizeof(ret));
88
+
305
+ return ldl_p(p);
89
+ attrs = full->attrs;
306
+ }
90
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
307
+ ret = cpu_ldl_code(env, pc);
91
+ mr = section->mr;
308
+ plug = tswap32(ret);
92
+
309
+ plugin_insn_append(pc, &plug, sizeof(ret));
93
+ qemu_mutex_lock_iothread();
94
+ int_st_mmio_leN(env, full, int128_getlo(val_le), addr, 8,
95
+ mmu_idx, ra, mr, mr_offset);
96
+ ret = int_st_mmio_leN(env, full, int128_gethi(val_le), addr + 8,
97
+ size - 8, mmu_idx, ra, mr, mr_offset + 8);
98
+ qemu_mutex_unlock_iothread();
99
+
310
+ return ret;
100
+ return ret;
311
+}
101
+}
312
+
102
+
313
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
103
/*
314
+{
104
* Wrapper for the above.
315
+ uint64_t ret, plug;
105
*/
316
+ void *p = translator_access(env, db, pc, sizeof(ret));
106
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
317
+
107
unsigned tmp, half_size;
318
+ if (p) {
108
319
+ plugin_insn_append(pc, p, sizeof(ret));
109
if (unlikely(p->flags & TLB_MMIO)) {
320
+ return ldq_p(p);
110
- QEMU_IOTHREAD_LOCK_GUARD();
321
+ }
111
return do_st_mmio_leN(env, p->full, val_le, p->addr,
322
+ ret = cpu_ldq_code(env, pc);
112
p->size, mmu_idx, ra);
323
+ plug = tswap64(ret);
113
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
324
+ plugin_insn_append(pc, &plug, sizeof(ret));
114
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
325
+ return ret;
115
MemOp atom;
326
+}
116
117
if (unlikely(p->flags & TLB_MMIO)) {
118
- QEMU_IOTHREAD_LOCK_GUARD();
119
- do_st_mmio_leN(env, p->full, int128_getlo(val_le),
120
- p->addr, 8, mmu_idx, ra);
121
- return do_st_mmio_leN(env, p->full, int128_gethi(val_le),
122
- p->addr + 8, size - 8, mmu_idx, ra);
123
+ return do_st16_mmio_leN(env, p->full, val_le, p->addr,
124
+ size, mmu_idx, ra);
125
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
126
return int128_gethi(val_le) >> ((size - 8) * 8);
127
}
128
@@ -XXX,XX +XXX,XX @@ static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
129
int mmu_idx, uintptr_t ra)
130
{
131
if (unlikely(p->flags & TLB_MMIO)) {
132
- QEMU_IOTHREAD_LOCK_GUARD();
133
do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
134
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
135
/* nothing */
136
@@ -XXX,XX +XXX,XX @@ static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
137
if ((memop & MO_BSWAP) != MO_LE) {
138
val = bswap16(val);
139
}
140
- QEMU_IOTHREAD_LOCK_GUARD();
141
do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
142
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
143
/* nothing */
144
@@ -XXX,XX +XXX,XX @@ static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
145
if ((memop & MO_BSWAP) != MO_LE) {
146
val = bswap32(val);
147
}
148
- QEMU_IOTHREAD_LOCK_GUARD();
149
do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
150
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
151
/* nothing */
152
@@ -XXX,XX +XXX,XX @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
153
if ((memop & MO_BSWAP) != MO_LE) {
154
val = bswap64(val);
155
}
156
- QEMU_IOTHREAD_LOCK_GUARD();
157
do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
158
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
159
/* nothing */
160
@@ -XXX,XX +XXX,XX @@ static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
161
if ((l.memop & MO_BSWAP) != MO_LE) {
162
val = bswap128(val);
163
}
164
- a = int128_getlo(val);
165
- b = int128_gethi(val);
166
- QEMU_IOTHREAD_LOCK_GUARD();
167
- do_st_mmio_leN(env, l.page[0].full, a, addr, 8, l.mmu_idx, ra);
168
- do_st_mmio_leN(env, l.page[0].full, b, addr + 8, 8, l.mmu_idx, ra);
169
+ do_st16_mmio_leN(env, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
170
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
171
/* nothing */
172
} else {
327
--
173
--
328
2.34.1
174
2.34.1
diff view generated by jsdifflib
1
It was non-obvious to me why we can raise an exception in
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
the middle of a comparison function, but it works.
3
While nearby, use TARGET_PAGE_ALIGN instead of open-coding.
4
2
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
3
We missed these functions when upstreaming the bfloat16 support.
4
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
6
Message-Id: <20230531065458.2082-1-zhiwei_liu@linux.alibaba.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
---
8
accel/tcg/cpu-exec.c | 11 ++++++++++-
9
include/fpu/softfloat.h | 12 +++++++++
9
1 file changed, 10 insertions(+), 1 deletion(-)
10
fpu/softfloat.c | 58 +++++++++++++++++++++++++++++++++++++++++
11
2 files changed, 70 insertions(+)
10
12
11
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
13
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
12
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
13
--- a/accel/tcg/cpu-exec.c
15
--- a/include/fpu/softfloat.h
14
+++ b/accel/tcg/cpu-exec.c
16
+++ b/include/fpu/softfloat.h
15
@@ -XXX,XX +XXX,XX @@ static bool tb_lookup_cmp(const void *p, const void *d)
17
@@ -XXX,XX +XXX,XX @@ float32 bfloat16_to_float32(bfloat16, float_status *status);
16
tb_page_addr_t phys_page2;
18
bfloat16 float64_to_bfloat16(float64 a, float_status *status);
17
target_ulong virt_page2;
19
float64 bfloat16_to_float64(bfloat16 a, float_status *status);
18
20
19
- virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
21
+int8_t bfloat16_to_int8_scalbn(bfloat16, FloatRoundMode,
20
+ /*
22
+ int, float_status *status);
21
+ * We know that the first page matched, and an otherwise valid TB
23
int16_t bfloat16_to_int16_scalbn(bfloat16, FloatRoundMode,
22
+ * encountered an incomplete instruction at the end of that page,
24
int, float_status *status);
23
+ * therefore we know that generating a new TB from the current PC
25
int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
24
+ * must also require reading from the next page -- even if the
26
@@ -XXX,XX +XXX,XX @@ int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
25
+ * second pages do not match, and therefore the resulting insn
27
int64_t bfloat16_to_int64_scalbn(bfloat16, FloatRoundMode,
26
+ * is different for the new TB. Therefore any exception raised
28
int, float_status *status);
27
+ * here by the faulting lookup is not premature.
29
28
+ */
30
+int8_t bfloat16_to_int8(bfloat16, float_status *status);
29
+ virt_page2 = TARGET_PAGE_ALIGN(desc->pc);
31
int16_t bfloat16_to_int16(bfloat16, float_status *status);
30
phys_page2 = get_page_addr_code(desc->env, virt_page2);
32
int32_t bfloat16_to_int32(bfloat16, float_status *status);
31
if (tb->page_addr[1] == phys_page2) {
33
int64_t bfloat16_to_int64(bfloat16, float_status *status);
32
return true;
34
35
+int8_t bfloat16_to_int8_round_to_zero(bfloat16, float_status *status);
36
int16_t bfloat16_to_int16_round_to_zero(bfloat16, float_status *status);
37
int32_t bfloat16_to_int32_round_to_zero(bfloat16, float_status *status);
38
int64_t bfloat16_to_int64_round_to_zero(bfloat16, float_status *status);
39
40
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode,
41
+ int, float_status *status);
42
uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode,
43
int, float_status *status);
44
uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
45
@@ -XXX,XX +XXX,XX @@ uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
46
uint64_t bfloat16_to_uint64_scalbn(bfloat16 a, FloatRoundMode,
47
int, float_status *status);
48
49
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *status);
50
uint16_t bfloat16_to_uint16(bfloat16 a, float_status *status);
51
uint32_t bfloat16_to_uint32(bfloat16 a, float_status *status);
52
uint64_t bfloat16_to_uint64(bfloat16 a, float_status *status);
53
54
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *status);
55
uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *status);
56
uint32_t bfloat16_to_uint32_round_to_zero(bfloat16 a, float_status *status);
57
uint64_t bfloat16_to_uint64_round_to_zero(bfloat16 a, float_status *status);
58
59
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int, float_status *status);
60
bfloat16 int16_to_bfloat16_scalbn(int16_t a, int, float_status *status);
61
bfloat16 int32_to_bfloat16_scalbn(int32_t a, int, float_status *status);
62
bfloat16 int64_to_bfloat16_scalbn(int64_t a, int, float_status *status);
63
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int, float_status *status);
64
bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int, float_status *status);
65
bfloat16 uint32_to_bfloat16_scalbn(uint32_t a, int, float_status *status);
66
bfloat16 uint64_to_bfloat16_scalbn(uint64_t a, int, float_status *status);
67
68
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status);
69
bfloat16 int16_to_bfloat16(int16_t a, float_status *status);
70
bfloat16 int32_to_bfloat16(int32_t a, float_status *status);
71
bfloat16 int64_to_bfloat16(int64_t a, float_status *status);
72
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status);
73
bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status);
74
bfloat16 uint32_to_bfloat16(uint32_t a, float_status *status);
75
bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status);
76
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/fpu/softfloat.c
79
+++ b/fpu/softfloat.c
80
@@ -XXX,XX +XXX,XX @@ int64_t float64_to_int64_scalbn(float64 a, FloatRoundMode rmode, int scale,
81
return parts_float_to_sint(&p, rmode, scale, INT64_MIN, INT64_MAX, s);
82
}
83
84
+int8_t bfloat16_to_int8_scalbn(bfloat16 a, FloatRoundMode rmode, int scale,
85
+ float_status *s)
86
+{
87
+ FloatParts64 p;
88
+
89
+ bfloat16_unpack_canonical(&p, a, s);
90
+ return parts_float_to_sint(&p, rmode, scale, INT8_MIN, INT8_MAX, s);
91
+}
92
+
93
int16_t bfloat16_to_int16_scalbn(bfloat16 a, FloatRoundMode rmode, int scale,
94
float_status *s)
95
{
96
@@ -XXX,XX +XXX,XX @@ int64_t floatx80_to_int64_round_to_zero(floatx80 a, float_status *s)
97
return floatx80_to_int64_scalbn(a, float_round_to_zero, 0, s);
98
}
99
100
+int8_t bfloat16_to_int8(bfloat16 a, float_status *s)
101
+{
102
+ return bfloat16_to_int8_scalbn(a, s->float_rounding_mode, 0, s);
103
+}
104
+
105
int16_t bfloat16_to_int16(bfloat16 a, float_status *s)
106
{
107
return bfloat16_to_int16_scalbn(a, s->float_rounding_mode, 0, s);
108
@@ -XXX,XX +XXX,XX @@ int64_t bfloat16_to_int64(bfloat16 a, float_status *s)
109
return bfloat16_to_int64_scalbn(a, s->float_rounding_mode, 0, s);
110
}
111
112
+int8_t bfloat16_to_int8_round_to_zero(bfloat16 a, float_status *s)
113
+{
114
+ return bfloat16_to_int8_scalbn(a, float_round_to_zero, 0, s);
115
+}
116
+
117
int16_t bfloat16_to_int16_round_to_zero(bfloat16 a, float_status *s)
118
{
119
return bfloat16_to_int16_scalbn(a, float_round_to_zero, 0, s);
120
@@ -XXX,XX +XXX,XX @@ uint64_t float64_to_uint64_scalbn(float64 a, FloatRoundMode rmode, int scale,
121
return parts_float_to_uint(&p, rmode, scale, UINT64_MAX, s);
122
}
123
124
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode rmode,
125
+ int scale, float_status *s)
126
+{
127
+ FloatParts64 p;
128
+
129
+ bfloat16_unpack_canonical(&p, a, s);
130
+ return parts_float_to_uint(&p, rmode, scale, UINT8_MAX, s);
131
+}
132
+
133
uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode rmode,
134
int scale, float_status *s)
135
{
136
@@ -XXX,XX +XXX,XX @@ Int128 float128_to_uint128_round_to_zero(float128 a, float_status *s)
137
return float128_to_uint128_scalbn(a, float_round_to_zero, 0, s);
138
}
139
140
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *s)
141
+{
142
+ return bfloat16_to_uint8_scalbn(a, s->float_rounding_mode, 0, s);
143
+}
144
+
145
uint16_t bfloat16_to_uint16(bfloat16 a, float_status *s)
146
{
147
return bfloat16_to_uint16_scalbn(a, s->float_rounding_mode, 0, s);
148
@@ -XXX,XX +XXX,XX @@ uint64_t bfloat16_to_uint64(bfloat16 a, float_status *s)
149
return bfloat16_to_uint64_scalbn(a, s->float_rounding_mode, 0, s);
150
}
151
152
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *s)
153
+{
154
+ return bfloat16_to_uint8_scalbn(a, float_round_to_zero, 0, s);
155
+}
156
+
157
uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *s)
158
{
159
return bfloat16_to_uint16_scalbn(a, float_round_to_zero, 0, s);
160
@@ -XXX,XX +XXX,XX @@ bfloat16 int16_to_bfloat16_scalbn(int16_t a, int scale, float_status *status)
161
return int64_to_bfloat16_scalbn(a, scale, status);
162
}
163
164
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int scale, float_status *status)
165
+{
166
+ return int64_to_bfloat16_scalbn(a, scale, status);
167
+}
168
+
169
bfloat16 int64_to_bfloat16(int64_t a, float_status *status)
170
{
171
return int64_to_bfloat16_scalbn(a, 0, status);
172
@@ -XXX,XX +XXX,XX @@ bfloat16 int16_to_bfloat16(int16_t a, float_status *status)
173
return int64_to_bfloat16_scalbn(a, 0, status);
174
}
175
176
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status)
177
+{
178
+ return int64_to_bfloat16_scalbn(a, 0, status);
179
+}
180
+
181
float128 int128_to_float128(Int128 a, float_status *status)
182
{
183
FloatParts128 p = { };
184
@@ -XXX,XX +XXX,XX @@ bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int scale, float_status *status)
185
return uint64_to_bfloat16_scalbn(a, scale, status);
186
}
187
188
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int scale, float_status *status)
189
+{
190
+ return uint64_to_bfloat16_scalbn(a, scale, status);
191
+}
192
+
193
bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status)
194
{
195
return uint64_to_bfloat16_scalbn(a, 0, status);
196
@@ -XXX,XX +XXX,XX @@ bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status)
197
return uint64_to_bfloat16_scalbn(a, 0, status);
198
}
199
200
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status)
201
+{
202
+ return uint64_to_bfloat16_scalbn(a, 0, status);
203
+}
204
+
205
float128 uint64_to_float128(uint64_t a, float_status *status)
206
{
207
FloatParts128 p;
33
--
208
--
34
2.34.1
209
2.34.1
diff view generated by jsdifflib
1
Right now the translator stops right *after* the end of a page, which
1
Motorola treats denormals with explicit integer bit set as
2
breaks reporting of fault locations when the last instruction of a
2
having unbiased exponent 0, unlike Intel which treats it as
3
multi-insn translation block crosses a page boundary.
3
having unbiased exponent 1 (more like all other IEEE formats
4
that have no explicit integer bit).
4
5
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1155
6
Add a flag on FloatFmt to differentiate the behaviour.
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
7
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
8
Reported-by: Keith Packard <keithp@keithp.com>
8
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
11
---
11
target/riscv/translate.c | 17 +++++--
12
fpu/softfloat.c | 9 +++++-
12
tests/tcg/riscv64/noexec.c | 79 +++++++++++++++++++++++++++++++
13
tests/tcg/m68k/denormal.c | 53 ++++++++++++++++++++++++++++++++++
13
tests/tcg/riscv64/Makefile.target | 1 +
14
fpu/softfloat-parts.c.inc | 7 +++--
14
3 files changed, 93 insertions(+), 4 deletions(-)
15
tests/tcg/m68k/Makefile.target | 2 +-
15
create mode 100644 tests/tcg/riscv64/noexec.c
16
4 files changed, 66 insertions(+), 5 deletions(-)
17
create mode 100644 tests/tcg/m68k/denormal.c
16
18
17
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
19
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
18
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/translate.c
21
--- a/fpu/softfloat.c
20
+++ b/target/riscv/translate.c
22
+++ b/fpu/softfloat.c
21
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
23
@@ -XXX,XX +XXX,XX @@ typedef struct {
22
}
24
* round_mask: bits below lsb which must be rounded
23
ctx->nftemp = 0;
25
* The following optional modifiers are available:
24
26
* arm_althp: handle ARM Alternative Half Precision
25
+ /* Only the first insn within a TB is allowed to cross a page boundary. */
27
+ * m68k_denormal: explicit integer bit for extended precision may be 1
26
if (ctx->base.is_jmp == DISAS_NEXT) {
28
*/
27
- target_ulong page_start;
29
typedef struct {
28
-
30
int exp_size;
29
- page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
31
@@ -XXX,XX +XXX,XX @@ typedef struct {
30
- if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
32
int frac_size;
31
+ if (!is_same_page(&ctx->base, ctx->base.pc_next)) {
33
int frac_shift;
32
ctx->base.is_jmp = DISAS_TOO_MANY;
34
bool arm_althp;
33
+ } else {
35
+ bool m68k_denormal;
34
+ unsigned page_ofs = ctx->base.pc_next & ~TARGET_PAGE_MASK;
36
uint64_t round_mask;
35
+
37
} FloatFmt;
36
+ if (page_ofs > TARGET_PAGE_SIZE - MAX_INSN_LEN) {
38
37
+ uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next);
39
@@ -XXX,XX +XXX,XX @@ static const FloatFmt float128_params = {
38
+ int len = insn_len(next_insn);
40
static const FloatFmt floatx80_params[3] = {
39
+
41
[floatx80_precision_s] = { FLOATX80_PARAMS(23) },
40
+ if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) {
42
[floatx80_precision_d] = { FLOATX80_PARAMS(52) },
41
+ ctx->base.is_jmp = DISAS_TOO_MANY;
43
- [floatx80_precision_x] = { FLOATX80_PARAMS(64) },
42
+ }
44
+ [floatx80_precision_x] = {
43
+ }
45
+ FLOATX80_PARAMS(64),
44
}
46
+#ifdef TARGET_M68K
45
}
47
+ .m68k_denormal = true,
46
}
48
+#endif
47
diff --git a/tests/tcg/riscv64/noexec.c b/tests/tcg/riscv64/noexec.c
49
+ },
50
};
51
52
/* Unpack a float to parts, but do not canonicalize. */
53
diff --git a/tests/tcg/m68k/denormal.c b/tests/tcg/m68k/denormal.c
48
new file mode 100644
54
new file mode 100644
49
index XXXXXXX..XXXXXXX
55
index XXXXXXX..XXXXXXX
50
--- /dev/null
56
--- /dev/null
51
+++ b/tests/tcg/riscv64/noexec.c
57
+++ b/tests/tcg/m68k/denormal.c
52
@@ -XXX,XX +XXX,XX @@
58
@@ -XXX,XX +XXX,XX @@
53
+#include "../multiarch/noexec.c.inc"
59
+/*
60
+ * Test m68k extended double denormals.
61
+ */
54
+
62
+
55
+static void *arch_mcontext_pc(const mcontext_t *ctx)
63
+#include <stdio.h>
64
+#include <stdint.h>
65
+
66
+#define TEST(X, Y) { X, Y, X * Y }
67
+
68
+static volatile long double test[][3] = {
69
+ TEST(0x1p+16383l, 0x1p-16446l),
70
+ TEST(0x1.1p-8223l, 0x1.1p-8224l),
71
+ TEST(1.0l, 0x1p-16383l),
72
+};
73
+
74
+#undef TEST
75
+
76
+static void dump_ld(const char *label, long double ld)
56
+{
77
+{
57
+ return (void *)ctx->__gregs[REG_PC];
78
+ union {
79
+ long double d;
80
+ struct {
81
+ uint32_t exp:16;
82
+ uint32_t space:16;
83
+ uint32_t h;
84
+ uint32_t l;
85
+ };
86
+ } u;
87
+
88
+ u.d = ld;
89
+ printf("%12s: % -27La 0x%04x 0x%08x 0x%08x\n", label, u.d, u.exp, u.h, u.l);
58
+}
90
+}
59
+
60
+static int arch_mcontext_arg(const mcontext_t *ctx)
61
+{
62
+ return ctx->__gregs[REG_A0];
63
+}
64
+
65
+static void arch_flush(void *p, int len)
66
+{
67
+ __builtin___clear_cache(p, p + len);
68
+}
69
+
70
+extern char noexec_1[];
71
+extern char noexec_2[];
72
+extern char noexec_end[];
73
+
74
+asm(".option push\n"
75
+ ".option norvc\n"
76
+ "noexec_1:\n"
77
+ " li a0,1\n" /* a0 is 0 on entry, set 1. */
78
+ "noexec_2:\n"
79
+ " li a0,2\n" /* a0 is 0/1; set 2. */
80
+ " ret\n"
81
+ "noexec_end:\n"
82
+ ".option pop");
83
+
91
+
84
+int main(void)
92
+int main(void)
85
+{
93
+{
86
+ struct noexec_test noexec_tests[] = {
94
+ int i, n = sizeof(test) / sizeof(test[0]), err = 0;
87
+ {
88
+ .name = "fallthrough",
89
+ .test_code = noexec_1,
90
+ .test_len = noexec_end - noexec_1,
91
+ .page_ofs = noexec_1 - noexec_2,
92
+ .entry_ofs = noexec_1 - noexec_2,
93
+ .expected_si_ofs = 0,
94
+ .expected_pc_ofs = 0,
95
+ .expected_arg = 1,
96
+ },
97
+ {
98
+ .name = "jump",
99
+ .test_code = noexec_1,
100
+ .test_len = noexec_end - noexec_1,
101
+ .page_ofs = noexec_1 - noexec_2,
102
+ .entry_ofs = 0,
103
+ .expected_si_ofs = 0,
104
+ .expected_pc_ofs = 0,
105
+ .expected_arg = 0,
106
+ },
107
+ {
108
+ .name = "fallthrough [cross]",
109
+ .test_code = noexec_1,
110
+ .test_len = noexec_end - noexec_1,
111
+ .page_ofs = noexec_1 - noexec_2 - 2,
112
+ .entry_ofs = noexec_1 - noexec_2 - 2,
113
+ .expected_si_ofs = 0,
114
+ .expected_pc_ofs = -2,
115
+ .expected_arg = 1,
116
+ },
117
+ {
118
+ .name = "jump [cross]",
119
+ .test_code = noexec_1,
120
+ .test_len = noexec_end - noexec_1,
121
+ .page_ofs = noexec_1 - noexec_2 - 2,
122
+ .entry_ofs = -2,
123
+ .expected_si_ofs = 0,
124
+ .expected_pc_ofs = -2,
125
+ .expected_arg = 0,
126
+ },
127
+ };
128
+
95
+
129
+ return test_noexec(noexec_tests,
96
+ for (i = 0; i < n; ++i) {
130
+ sizeof(noexec_tests) / sizeof(noexec_tests[0]));
97
+ long double x = test[i][0];
98
+ long double y = test[i][1];
99
+ long double build_mul = test[i][2];
100
+ long double runtime_mul = x * y;
101
+
102
+ if (runtime_mul != build_mul) {
103
+ dump_ld("x", x);
104
+ dump_ld("y", y);
105
+ dump_ld("build_mul", build_mul);
106
+ dump_ld("runtime_mul", runtime_mul);
107
+ err = 1;
108
+ }
109
+ }
110
+ return err;
131
+}
111
+}
132
diff --git a/tests/tcg/riscv64/Makefile.target b/tests/tcg/riscv64/Makefile.target
112
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
133
index XXXXXXX..XXXXXXX 100644
113
index XXXXXXX..XXXXXXX 100644
134
--- a/tests/tcg/riscv64/Makefile.target
114
--- a/fpu/softfloat-parts.c.inc
135
+++ b/tests/tcg/riscv64/Makefile.target
115
+++ b/fpu/softfloat-parts.c.inc
116
@@ -XXX,XX +XXX,XX @@ static void partsN(canonicalize)(FloatPartsN *p, float_status *status,
117
} else {
118
int shift = frac_normalize(p);
119
p->cls = float_class_normal;
120
- p->exp = fmt->frac_shift - fmt->exp_bias - shift + 1;
121
+ p->exp = fmt->frac_shift - fmt->exp_bias
122
+ - shift + !fmt->m68k_denormal;
123
}
124
} else if (likely(p->exp < fmt->exp_max) || fmt->arm_althp) {
125
p->cls = float_class_normal;
126
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
127
is_tiny = !frac_addi(&discard, p, inc);
128
}
129
130
- frac_shrjam(p, 1 - exp);
131
+ frac_shrjam(p, !fmt->m68k_denormal - exp);
132
133
if (p->frac_lo & round_mask) {
134
/* Need to recompute round-to-even/round-to-odd. */
135
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
136
p->frac_lo &= ~round_mask;
137
}
138
139
- exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) != 0;
140
+ exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) && !fmt->m68k_denormal;
141
frac_shr(p, frac_shift);
142
143
if (is_tiny && (flags & float_flag_inexact)) {
144
diff --git a/tests/tcg/m68k/Makefile.target b/tests/tcg/m68k/Makefile.target
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tests/tcg/m68k/Makefile.target
147
+++ b/tests/tcg/m68k/Makefile.target
136
@@ -XXX,XX +XXX,XX @@
148
@@ -XXX,XX +XXX,XX @@
137
149
#
138
VPATH += $(SRC_PATH)/tests/tcg/riscv64
150
139
TESTS += test-div
151
VPATH += $(SRC_PATH)/tests/tcg/m68k
140
+TESTS += noexec
152
-TESTS += trap
153
+TESTS += trap denormal
154
155
# On m68k Linux supports 4k and 8k pages (but 8k is currently broken)
156
EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-8192
141
--
157
--
142
2.34.1
158
2.34.1
159
160
diff view generated by jsdifflib
1
This bit is not saved across interrupts, so we must
1
This hook may emit code at the beginning of the TB.
2
delay delivering the interrupt until the skip has
2
3
been processed.
3
Suggested-by: Jordan Niethe <jniethe5@gmail.com>
4
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1118
6
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
7
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
6
---
10
target/avr/helper.c | 9 +++++++++
7
tcg/tcg.c | 3 +++
11
target/avr/translate.c | 26 ++++++++++++++++++++++----
8
tcg/aarch64/tcg-target.c.inc | 5 +++++
12
2 files changed, 31 insertions(+), 4 deletions(-)
9
tcg/arm/tcg-target.c.inc | 5 +++++
13
10
tcg/i386/tcg-target.c.inc | 5 +++++
14
diff --git a/target/avr/helper.c b/target/avr/helper.c
11
tcg/loongarch64/tcg-target.c.inc | 5 +++++
15
index XXXXXXX..XXXXXXX 100644
12
tcg/mips/tcg-target.c.inc | 5 +++++
16
--- a/target/avr/helper.c
13
tcg/ppc/tcg-target.c.inc | 5 +++++
17
+++ b/target/avr/helper.c
14
tcg/riscv/tcg-target.c.inc | 5 +++++
18
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
15
tcg/s390x/tcg-target.c.inc | 5 +++++
19
AVRCPU *cpu = AVR_CPU(cs);
16
tcg/sparc64/tcg-target.c.inc | 5 +++++
20
CPUAVRState *env = &cpu->env;
17
tcg/tci/tcg-target.c.inc | 5 +++++
21
18
11 files changed, 53 insertions(+)
22
+ /*
19
23
+ * We cannot separate a skip from the next instruction,
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
24
+ * as the skip would not be preserved across the interrupt.
21
index XXXXXXX..XXXXXXX 100644
25
+ * Separating the two insn normally only happens at page boundaries.
22
--- a/tcg/tcg.c
26
+ */
23
+++ b/tcg/tcg.c
27
+ if (env->skip) {
24
@@ -XXX,XX +XXX,XX @@ static void tcg_register_jit_int(const void *buf, size_t size,
28
+ return false;
25
__attribute__((unused));
29
+ }
26
30
+
27
/* Forward declarations for functions declared and used in tcg-target.c.inc. */
31
if (interrupt_request & CPU_INTERRUPT_RESET) {
28
+static void tcg_out_tb_start(TCGContext *s);
32
if (cpu_interrupts_enabled(env)) {
29
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
33
cs->exception_index = EXCP_RESET;
30
intptr_t arg2);
34
diff --git a/target/avr/translate.c b/target/avr/translate.c
31
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
35
index XXXXXXX..XXXXXXX 100644
32
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
36
--- a/target/avr/translate.c
33
s->gen_insn_data =
37
+++ b/target/avr/translate.c
34
tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
38
@@ -XXX,XX +XXX,XX @@ static void avr_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
35
39
if (skip_label) {
36
+ tcg_out_tb_start(s);
40
canonicalize_skip(ctx);
37
+
41
gen_set_label(skip_label);
38
num_insns = -1;
42
- if (ctx->base.is_jmp == DISAS_NORETURN) {
39
QTAILQ_FOREACH(op, &s->ops, link) {
43
+
40
TCGOpcode opc = op->opc;
44
+ switch (ctx->base.is_jmp) {
41
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
45
+ case DISAS_NORETURN:
42
index XXXXXXX..XXXXXXX 100644
46
ctx->base.is_jmp = DISAS_CHAIN;
43
--- a/tcg/aarch64/tcg-target.c.inc
47
+ break;
44
+++ b/tcg/aarch64/tcg-target.c.inc
48
+ case DISAS_NEXT:
45
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
49
+ if (ctx->base.tb->flags & TB_FLAGS_SKIP) {
46
tcg_out_insn(s, 3207, RET, TCG_REG_LR);
50
+ ctx->base.is_jmp = DISAS_TOO_MANY;
47
}
51
+ }
48
52
+ break;
49
+static void tcg_out_tb_start(TCGContext *s)
53
+ default:
50
+{
54
+ break;
51
+ /* nothing to do */
55
}
52
+}
56
}
53
+
57
54
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
58
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
55
{
59
{
56
int i;
60
DisasContext *ctx = container_of(dcbase, DisasContext, base);
57
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
61
bool nonconst_skip = canonicalize_skip(ctx);
58
index XXXXXXX..XXXXXXX 100644
62
+ /*
59
--- a/tcg/arm/tcg-target.c.inc
63
+ * Because we disable interrupts while env->skip is set,
60
+++ b/tcg/arm/tcg-target.c.inc
64
+ * we must return to the main loop to re-evaluate afterward.
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_epilogue(TCGContext *s)
65
+ */
62
(1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
66
+ bool force_exit = ctx->base.tb->flags & TB_FLAGS_SKIP;
63
}
67
64
68
switch (ctx->base.is_jmp) {
65
+static void tcg_out_tb_start(TCGContext *s)
69
case DISAS_NORETURN:
66
+{
70
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
67
+ /* nothing to do */
71
case DISAS_NEXT:
68
+}
72
case DISAS_TOO_MANY:
69
+
73
case DISAS_CHAIN:
70
typedef struct {
74
- if (!nonconst_skip) {
71
DebugFrameHeader h;
75
+ if (!nonconst_skip && !force_exit) {
72
uint8_t fde_def_cfa[4];
76
/* Note gen_goto_tb checks singlestep. */
73
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
77
gen_goto_tb(ctx, 1, ctx->npc);
74
index XXXXXXX..XXXXXXX 100644
78
break;
75
--- a/tcg/i386/tcg-target.c.inc
79
@@ -XXX,XX +XXX,XX @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
76
+++ b/tcg/i386/tcg-target.c.inc
80
tcg_gen_movi_tl(cpu_pc, ctx->npc);
77
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
81
/* fall through */
78
tcg_out_opc(s, OPC_RET, 0, 0, 0);
82
case DISAS_LOOKUP:
79
}
83
- tcg_gen_lookup_and_goto_ptr();
80
84
- break;
81
+static void tcg_out_tb_start(TCGContext *s)
85
+ if (!force_exit) {
82
+{
86
+ tcg_gen_lookup_and_goto_ptr();
83
+ /* nothing to do */
87
+ break;
84
+}
88
+ }
85
+
89
+ /* fall through */
86
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
90
case DISAS_EXIT:
87
{
91
tcg_gen_exit_tb(NULL, 0);
88
memset(p, 0x90, count);
92
break;
89
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
90
index XXXXXXX..XXXXXXX 100644
91
--- a/tcg/loongarch64/tcg-target.c.inc
92
+++ b/tcg/loongarch64/tcg-target.c.inc
93
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
94
tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
95
}
96
97
+static void tcg_out_tb_start(TCGContext *s)
98
+{
99
+ /* nothing to do */
100
+}
101
+
102
static void tcg_target_init(TCGContext *s)
103
{
104
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
105
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
106
index XXXXXXX..XXXXXXX 100644
107
--- a/tcg/mips/tcg-target.c.inc
108
+++ b/tcg/mips/tcg-target.c.inc
109
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
110
tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
111
}
112
113
+static void tcg_out_tb_start(TCGContext *s)
114
+{
115
+ /* nothing to do */
116
+}
117
+
118
static void tcg_target_init(TCGContext *s)
119
{
120
tcg_target_detect_isa();
121
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/ppc/tcg-target.c.inc
124
+++ b/tcg/ppc/tcg-target.c.inc
125
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
126
tcg_out32(s, BCLR | BO_ALWAYS);
127
}
128
129
+static void tcg_out_tb_start(TCGContext *s)
130
+{
131
+ /* nothing to do */
132
+}
133
+
134
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
135
{
136
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
137
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
138
index XXXXXXX..XXXXXXX 100644
139
--- a/tcg/riscv/tcg-target.c.inc
140
+++ b/tcg/riscv/tcg-target.c.inc
141
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
142
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
143
}
144
145
+static void tcg_out_tb_start(TCGContext *s)
146
+{
147
+ /* nothing to do */
148
+}
149
+
150
static volatile sig_atomic_t got_sigill;
151
152
static void sigill_handler(int signo, siginfo_t *si, void *data)
153
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
154
index XXXXXXX..XXXXXXX 100644
155
--- a/tcg/s390x/tcg-target.c.inc
156
+++ b/tcg/s390x/tcg-target.c.inc
157
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
158
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
159
}
160
161
+static void tcg_out_tb_start(TCGContext *s)
162
+{
163
+ /* nothing to do */
164
+}
165
+
166
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
167
{
168
memset(p, 0x07, count * sizeof(tcg_insn_unit));
169
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
170
index XXXXXXX..XXXXXXX 100644
171
--- a/tcg/sparc64/tcg-target.c.inc
172
+++ b/tcg/sparc64/tcg-target.c.inc
173
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
174
tcg_out_movi_s13(s, TCG_REG_O0, 0);
175
}
176
177
+static void tcg_out_tb_start(TCGContext *s)
178
+{
179
+ /* nothing to do */
180
+}
181
+
182
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
183
{
184
int i;
185
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
186
index XXXXXXX..XXXXXXX 100644
187
--- a/tcg/tci/tcg-target.c.inc
188
+++ b/tcg/tci/tcg-target.c.inc
189
@@ -XXX,XX +XXX,XX @@ static inline void tcg_target_qemu_prologue(TCGContext *s)
190
{
191
}
192
193
+static void tcg_out_tb_start(TCGContext *s)
194
+{
195
+ /* nothing to do */
196
+}
197
+
198
bool tcg_target_has_memory_bswap(MemOp memop)
199
{
200
return true;
93
--
201
--
94
2.34.1
202
2.34.1
95
203
96
204
diff view generated by jsdifflib
1
There is no need to go through cc->tcg_ops when
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
we know what value that must have.
3
4
Reviewed-by: Michael Rolnik <mrolnik@gmail.com>
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
3
---
8
target/avr/helper.c | 5 ++---
4
host/include/aarch64/host/cpuinfo.h | 1 +
9
1 file changed, 2 insertions(+), 3 deletions(-)
5
util/cpuinfo-aarch64.c | 7 +++++++
6
2 files changed, 8 insertions(+)
10
7
11
diff --git a/target/avr/helper.c b/target/avr/helper.c
8
diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h
12
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
13
--- a/target/avr/helper.c
10
--- a/host/include/aarch64/host/cpuinfo.h
14
+++ b/target/avr/helper.c
11
+++ b/host/include/aarch64/host/cpuinfo.h
15
@@ -XXX,XX +XXX,XX @@
12
@@ -XXX,XX +XXX,XX @@
16
bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
13
#define CPUINFO_LSE (1u << 1)
17
{
14
#define CPUINFO_LSE2 (1u << 2)
18
bool ret = false;
15
#define CPUINFO_AES (1u << 3)
19
- CPUClass *cc = CPU_GET_CLASS(cs);
16
+#define CPUINFO_BTI (1u << 4)
20
AVRCPU *cpu = AVR_CPU(cs);
17
21
CPUAVRState *env = &cpu->env;
18
/* Initialized with a constructor. */
22
19
extern unsigned cpuinfo;
23
if (interrupt_request & CPU_INTERRUPT_RESET) {
20
diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c
24
if (cpu_interrupts_enabled(env)) {
21
index XXXXXXX..XXXXXXX 100644
25
cs->exception_index = EXCP_RESET;
22
--- a/util/cpuinfo-aarch64.c
26
- cc->tcg_ops->do_interrupt(cs);
23
+++ b/util/cpuinfo-aarch64.c
27
+ avr_cpu_do_interrupt(cs);
24
@@ -XXX,XX +XXX,XX @@
28
25
# include <asm/hwcap.h>
29
cs->interrupt_request &= ~CPU_INTERRUPT_RESET;
26
# include "elf.h"
30
27
# endif
31
@@ -XXX,XX +XXX,XX @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
28
+# ifndef HWCAP2_BTI
32
if (cpu_interrupts_enabled(env) && env->intsrc != 0) {
29
+# define HWCAP2_BTI 0 /* added in glibc 2.32 */
33
int index = ctz32(env->intsrc);
30
+# endif
34
cs->exception_index = EXCP_INT(index);
31
#endif
35
- cc->tcg_ops->do_interrupt(cs);
32
#ifdef CONFIG_DARWIN
36
+ avr_cpu_do_interrupt(cs);
33
# include <sys/sysctl.h>
37
34
@@ -XXX,XX +XXX,XX @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
38
env->intsrc &= env->intsrc - 1; /* clear the interrupt */
35
info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0);
39
if (!env->intsrc) {
36
info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0);
37
info |= (hwcap & HWCAP_AES ? CPUINFO_AES: 0);
38
+
39
+ unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
40
+ info |= (hwcap2 & HWCAP2_BTI ? CPUINFO_BTI : 0);
41
#endif
42
#ifdef CONFIG_DARWIN
43
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE;
44
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2;
45
info |= sysctl_for_bool("hw.optional.arm.FEAT_AES") * CPUINFO_AES;
46
+ info |= sysctl_for_bool("hw.optional.arm.FEAT_BTI") * CPUINFO_BTI;
47
#endif
48
49
cpuinfo = info;
40
--
50
--
41
2.34.1
51
2.34.1
42
52
43
53
diff view generated by jsdifflib
1
The function is not used outside of cpu-exec.c. Move it and
1
The prologue is entered via "call"; the epilogue, each tb,
2
its subroutines up in the file, before the first use.
2
and each goto_tb continuation point are all reached via "jump".
3
3
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
As tcg_out_goto_long is only used by tcg_out_exit_tb, merge
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
5
the two functions. Change the indirect register used to
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
6
TCG_REG_TMP1, aka X17, so that the BTI condition created
7
is "jump" instead of "jump or call".
8
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
11
---
9
include/exec/exec-all.h | 3 -
12
tcg/aarch64/tcg-target.c.inc | 54 ++++++++++++++++++++++++++----------
10
accel/tcg/cpu-exec.c | 122 ++++++++++++++++++++--------------------
13
1 file changed, 39 insertions(+), 15 deletions(-)
11
2 files changed, 61 insertions(+), 64 deletions(-)
12
14
13
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
15
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/exec-all.h
17
--- a/tcg/aarch64/tcg-target.c.inc
16
+++ b/include/exec/exec-all.h
18
+++ b/tcg/aarch64/tcg-target.c.inc
17
@@ -XXX,XX +XXX,XX @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
19
@@ -XXX,XX +XXX,XX @@ typedef enum {
18
#endif
20
DMB_ISH = 0xd50338bf,
19
void tb_flush(CPUState *cpu);
21
DMB_LD = 0x00000100,
20
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
22
DMB_ST = 0x00000200,
21
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
23
+
22
- target_ulong cs_base, uint32_t flags,
24
+ BTI_C = 0xd503245f,
23
- uint32_t cflags);
25
+ BTI_J = 0xd503249f,
24
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
26
+ BTI_JC = 0xd50324df,
25
27
} AArch64Insn;
26
/* GETPC is the true target of the return instruction that we'll execute. */
28
27
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
29
static inline uint32_t tcg_in32(TCGContext *s)
28
index XXXXXXX..XXXXXXX 100644
30
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
29
--- a/accel/tcg/cpu-exec.c
31
| rn << 5 | (rd & 0x1f));
30
+++ b/accel/tcg/cpu-exec.c
31
@@ -XXX,XX +XXX,XX @@ uint32_t curr_cflags(CPUState *cpu)
32
return cflags;
33
}
32
}
34
33
35
+struct tb_desc {
34
+static void tcg_out_bti(TCGContext *s, AArch64Insn insn)
36
+ target_ulong pc;
37
+ target_ulong cs_base;
38
+ CPUArchState *env;
39
+ tb_page_addr_t phys_page1;
40
+ uint32_t flags;
41
+ uint32_t cflags;
42
+ uint32_t trace_vcpu_dstate;
43
+};
44
+
45
+static bool tb_lookup_cmp(const void *p, const void *d)
46
+{
35
+{
47
+ const TranslationBlock *tb = p;
36
+ /*
48
+ const struct tb_desc *desc = d;
37
+ * While BTI insns are nops on hosts without FEAT_BTI,
49
+
38
+ * there is no point in emitting them in that case either.
50
+ if (tb->pc == desc->pc &&
39
+ */
51
+ tb->page_addr[0] == desc->phys_page1 &&
40
+ if (cpuinfo & CPUINFO_BTI) {
52
+ tb->cs_base == desc->cs_base &&
41
+ tcg_out32(s, insn);
53
+ tb->flags == desc->flags &&
54
+ tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
55
+ tb_cflags(tb) == desc->cflags) {
56
+ /* check next page if needed */
57
+ if (tb->page_addr[1] == -1) {
58
+ return true;
59
+ } else {
60
+ tb_page_addr_t phys_page2;
61
+ target_ulong virt_page2;
62
+
63
+ virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
64
+ phys_page2 = get_page_addr_code(desc->env, virt_page2);
65
+ if (tb->page_addr[1] == phys_page2) {
66
+ return true;
67
+ }
68
+ }
69
+ }
42
+ }
70
+ return false;
71
+}
43
+}
72
+
44
+
73
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
45
/* Register to register move using ORR (shifted register with no shift). */
74
+ target_ulong cs_base, uint32_t flags,
46
static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm)
75
+ uint32_t cflags)
47
{
76
+{
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
77
+ tb_page_addr_t phys_pc;
49
tcg_out_insn(s, 3206, B, offset);
78
+ struct tb_desc desc;
79
+ uint32_t h;
80
+
81
+ desc.env = cpu->env_ptr;
82
+ desc.cs_base = cs_base;
83
+ desc.flags = flags;
84
+ desc.cflags = cflags;
85
+ desc.trace_vcpu_dstate = *cpu->trace_dstate;
86
+ desc.pc = pc;
87
+ phys_pc = get_page_addr_code(desc.env, pc);
88
+ if (phys_pc == -1) {
89
+ return NULL;
90
+ }
91
+ desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
92
+ h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
93
+ return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
94
+}
95
+
96
/* Might cause an exception, so have a longjmp destination ready */
97
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
98
target_ulong cs_base,
99
@@ -XXX,XX +XXX,XX @@ void cpu_exec_step_atomic(CPUState *cpu)
100
end_exclusive();
101
}
50
}
102
51
103
-struct tb_desc {
52
-static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
104
- target_ulong pc;
105
- target_ulong cs_base;
106
- CPUArchState *env;
107
- tb_page_addr_t phys_page1;
108
- uint32_t flags;
109
- uint32_t cflags;
110
- uint32_t trace_vcpu_dstate;
111
-};
112
-
113
-static bool tb_lookup_cmp(const void *p, const void *d)
114
-{
53
-{
115
- const TranslationBlock *tb = p;
54
- ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
116
- const struct tb_desc *desc = d;
55
- if (offset == sextract64(offset, 0, 26)) {
117
-
56
- tcg_out_insn(s, 3206, B, offset);
118
- if (tb->pc == desc->pc &&
57
- } else {
119
- tb->page_addr[0] == desc->phys_page1 &&
58
- /* Choose X9 as a call-clobbered non-LR temporary. */
120
- tb->cs_base == desc->cs_base &&
59
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target);
121
- tb->flags == desc->flags &&
60
- tcg_out_insn(s, 3207, BR, TCG_REG_X9);
122
- tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
123
- tb_cflags(tb) == desc->cflags) {
124
- /* check next page if needed */
125
- if (tb->page_addr[1] == -1) {
126
- return true;
127
- } else {
128
- tb_page_addr_t phys_page2;
129
- target_ulong virt_page2;
130
-
131
- virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
132
- phys_page2 = get_page_addr_code(desc->env, virt_page2);
133
- if (tb->page_addr[1] == phys_page2) {
134
- return true;
135
- }
136
- }
137
- }
61
- }
138
- return false;
139
-}
62
-}
140
-
63
-
141
-TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
64
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *target)
142
- target_ulong cs_base, uint32_t flags,
143
- uint32_t cflags)
144
-{
145
- tb_page_addr_t phys_pc;
146
- struct tb_desc desc;
147
- uint32_t h;
148
-
149
- desc.env = cpu->env_ptr;
150
- desc.cs_base = cs_base;
151
- desc.flags = flags;
152
- desc.cflags = cflags;
153
- desc.trace_vcpu_dstate = *cpu->trace_dstate;
154
- desc.pc = pc;
155
- phys_pc = get_page_addr_code(desc.env, pc);
156
- if (phys_pc == -1) {
157
- return NULL;
158
- }
159
- desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
160
- h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
161
- return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
162
-}
163
-
164
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
165
{
65
{
166
if (TCG_TARGET_HAS_direct_jump) {
66
ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
67
@@ -XXX,XX +XXX,XX @@ static const tcg_insn_unit *tb_ret_addr;
68
69
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
70
{
71
+ const tcg_insn_unit *target;
72
+ ptrdiff_t offset;
73
+
74
/* Reuse the zeroing that exists for goto_ptr. */
75
if (a0 == 0) {
76
- tcg_out_goto_long(s, tcg_code_gen_epilogue);
77
+ target = tcg_code_gen_epilogue;
78
} else {
79
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
80
- tcg_out_goto_long(s, tb_ret_addr);
81
+ target = tb_ret_addr;
82
+ }
83
+
84
+ offset = tcg_pcrel_diff(s, target) >> 2;
85
+ if (offset == sextract64(offset, 0, 26)) {
86
+ tcg_out_insn(s, 3206, B, offset);
87
+ } else {
88
+ /*
89
+ * Only x16/x17 generate BTI type Jump (2),
90
+ * other registers generate BTI type Jump|Call (3).
91
+ */
92
+ QEMU_BUILD_BUG_ON(TCG_REG_TMP0 != TCG_REG_X16);
93
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, (intptr_t)target);
94
+ tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
95
}
96
}
97
98
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
99
tcg_out32(s, I3206_B);
100
tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
101
set_jmp_reset_offset(s, which);
102
+ tcg_out_bti(s, BTI_J);
103
}
104
105
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
106
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
107
{
108
TCGReg r;
109
110
+ tcg_out_bti(s, BTI_C);
111
+
112
/* Push (FP, LR) and allocate space for all saved registers. */
113
tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR,
114
TCG_REG_SP, -PUSH_SIZE, 1, 1);
115
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
116
* and fall through to the rest of the epilogue.
117
*/
118
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
119
+ tcg_out_bti(s, BTI_J);
120
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
121
122
/* TB epilogue */
123
tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
124
+ tcg_out_bti(s, BTI_J);
125
126
/* Remove TCG locals stack space. */
127
tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
128
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
129
130
static void tcg_out_tb_start(TCGContext *s)
131
{
132
- /* nothing to do */
133
+ tcg_out_bti(s, BTI_J);
134
}
135
136
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
167
--
137
--
168
2.34.1
138
2.34.1
diff view generated by jsdifflib
1
We're about to start validating PAGE_EXEC, which means that we've
1
For linux aarch64 host supporting BTI, map the buffer
2
got to mark the vsyscall page executable. We had been special
2
to require BTI instructions at branch landing pads.
3
casing this entirely within translate.
4
3
5
Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
6
Tested-by: Ilya Leoshkevich <iii@linux.ibm.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
linux-user/elfload.c | 23 +++++++++++++++++++++++
7
tcg/region.c | 41 ++++++++++++++++++++++++++++++-----------
10
1 file changed, 23 insertions(+)
8
1 file changed, 30 insertions(+), 11 deletions(-)
11
9
12
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
10
diff --git a/tcg/region.c b/tcg/region.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/linux-user/elfload.c
12
--- a/tcg/region.c
15
+++ b/linux-user/elfload.c
13
+++ b/tcg/region.c
16
@@ -XXX,XX +XXX,XX @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *en
14
@@ -XXX,XX +XXX,XX @@
17
(*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff);
15
#include "tcg/tcg.h"
16
#include "exec/translation-block.h"
17
#include "tcg-internal.h"
18
+#include "host/cpuinfo.h"
19
20
21
+/*
22
+ * Local source-level compatibility with Unix.
23
+ * Used by tcg_region_init below.
24
+ */
25
+#if defined(_WIN32)
26
+#define PROT_READ 1
27
+#define PROT_WRITE 2
28
+#define PROT_EXEC 4
29
+#endif
30
+
31
struct tcg_region_tree {
32
QemuMutex lock;
33
QTree *tree;
34
@@ -XXX,XX +XXX,XX @@ bool in_code_gen_buffer(const void *p)
35
return (size_t)(p - region.start_aligned) <= region.total_size;
18
}
36
}
19
37
20
+#if ULONG_MAX >= TARGET_VSYSCALL_PAGE
38
+#ifndef CONFIG_TCG_INTERPRETER
21
+#define INIT_GUEST_COMMPAGE
39
+static int host_prot_read_exec(void)
22
+static bool init_guest_commpage(void)
23
+{
40
+{
24
+ /*
41
+#if defined(CONFIG_LINUX) && defined(HOST_AARCH64) && defined(PROT_BTI)
25
+ * The vsyscall page is at a high negative address aka kernel space,
42
+ if (cpuinfo & CPUINFO_BTI) {
26
+ * which means that we cannot actually allocate it with target_mmap.
43
+ return PROT_READ | PROT_EXEC | PROT_BTI;
27
+ * We still should be able to use page_set_flags, unless the user
28
+ * has specified -R reserved_va, which would trigger an assert().
29
+ */
30
+ if (reserved_va != 0 &&
31
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
32
+ error_report("Cannot allocate vsyscall page");
33
+ exit(EXIT_FAILURE);
34
+ }
44
+ }
35
+ page_set_flags(TARGET_VSYSCALL_PAGE,
45
+#endif
36
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE,
46
+ return PROT_READ | PROT_EXEC;
37
+ PAGE_EXEC | PAGE_VALID);
38
+ return true;
39
+}
47
+}
40
+#endif
48
+#endif
41
#else
49
+
42
50
#ifdef CONFIG_DEBUG_TCG
43
#define ELF_START_MMAP 0x80000000
51
const void *tcg_splitwx_to_rx(void *rw)
44
@@ -XXX,XX +XXX,XX @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
52
{
45
#else
53
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
46
#define HI_COMMPAGE 0
54
return PROT_READ | PROT_WRITE;
47
#define LO_COMMPAGE -1
55
}
48
+#ifndef INIT_GUEST_COMMPAGE
56
#elif defined(_WIN32)
49
#define init_guest_commpage() true
57
-/*
58
- * Local source-level compatibility with Unix.
59
- * Used by tcg_region_init below.
60
- */
61
-#define PROT_READ 1
62
-#define PROT_WRITE 2
63
-#define PROT_EXEC 4
64
-
65
static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
66
{
67
void *buf;
68
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
69
goto fail;
70
}
71
72
- buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
73
+ buf_rx = mmap(NULL, size, host_prot_read_exec(), MAP_SHARED, fd, 0);
74
if (buf_rx == MAP_FAILED) {
75
goto fail_rx;
76
}
77
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
78
return -1;
79
}
80
81
- if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
82
+ if (mprotect((void *)buf_rx, size, host_prot_read_exec()) != 0) {
83
error_setg_errno(errp, errno, "mprotect for jit splitwx");
84
munmap((void *)buf_rx, size);
85
munmap((void *)buf_rw, size);
86
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
87
need_prot = PROT_READ | PROT_WRITE;
88
#ifndef CONFIG_TCG_INTERPRETER
89
if (tcg_splitwx_diff == 0) {
90
- need_prot |= PROT_EXEC;
91
+ need_prot |= host_prot_read_exec();
92
}
50
#endif
93
#endif
94
for (size_t i = 0, n = region.n; i < n; i++) {
95
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
96
} else if (need_prot == (PROT_READ | PROT_WRITE)) {
97
rc = qemu_mprotect_rw(start, end - start);
98
} else {
99
+#ifdef CONFIG_POSIX
100
+ rc = mprotect(start, end - start, need_prot);
101
+#else
102
g_assert_not_reached();
51
+#endif
103
+#endif
52
104
}
53
static void pgb_fail_in_use(const char *image_name)
105
if (rc) {
54
{
106
error_setg_errno(&error_fatal, errno,
55
--
107
--
56
2.34.1
108
2.34.1
109
110
diff view generated by jsdifflib