1
The following changes since commit 0319ad22bd5789e1eaa8a2dd5773db2d2c372f20:
1
The following changes since commit 005ad32358f12fe9313a4a01918a55e60d4f39e5:
2
2
3
Merge remote-tracking branch 'remotes/stsquad/tags/pull-testing-and-misc-updates-250521-2' into staging (2021-05-25 17:31:04 +0100)
3
Merge tag 'pull-tpm-2023-09-12-3' of https://github.com/stefanberger/qemu-tpm into staging (2023-09-13 13:41:57 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20210526
7
https://gitlab.com/rth7680/qemu.git tags/pull-tcg-20230915
8
8
9
for you to fetch changes up to 119065574d02deffc28fe5b6a864db9b467c6ffd:
9
for you to fetch changes up to e0d9f49c143359b4a34cb80737af57228c62a008:
10
10
11
hw/core: Constify TCGCPUOps (2021-05-26 15:33:59 -0700)
11
accel/tcg: Restrict tcg_exec_[un]realizefn() to TCG (2023-09-15 19:06:29 -0700)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Adjust types for some memory access functions.
14
*: Delete checks for old host definitions
15
Reduce inclusion of tcg headers.
15
tcg/loongarch64: Generate LSX instructions
16
Fix watchpoints vs replay.
16
fpu: Add conversions between bfloat16 and [u]int8
17
Fix tcg/aarch64 roli expansion.
17
fpu: Handle m68k extended precision denormals properly
18
Introduce SysemuCPUOps structure.
18
accel/tcg: Improve cputlb i/o organization
19
accel/tcg: Simplify tlb_plugin_lookup
20
accel/tcg: Remove false-negative halted assertion
21
tcg: Add gvec compare with immediate and scalar operand
22
tcg/aarch64: Emit BTI insns at jump landing pads
19
23
20
----------------------------------------------------------------
24
----------------------------------------------------------------
21
Pavel Dovgalyuk (1):
25
Akihiko Odaki (3):
22
replay: fix watchpoint processing for reverse debugging
26
util: Delete checks for old host definitions
23
27
softmmu: Delete checks for old host definitions
24
Philippe Mathieu-Daudé (27):
28
thunk: Delete checks for old host definitions
25
exec/memory_ldst_cached: Sort declarations
29
26
exec/memory_ldst_phys: Sort declarations
30
Anton Johansson (9):
27
exec/memory_ldst: Use correct type sizes
31
target/arm: Replace TARGET_PAGE_ENTRY_EXTRA
28
exec/memory_ldst_phys: Use correct type sizes
32
accel/tcg: Modify tlb_*() to use CPUState
29
exec/memory_ldst_cached: Use correct type size
33
accel/tcg: Modify probe_access_internal() to use CPUState
30
exec/memory: Use correct type size
34
accel/tcg: Modify memory access functions to use CPUState
31
accel/tcg: Reduce 'exec/tb-context.h' inclusion
35
accel/tcg: Modify atomic_mmu_lookup() to use CPUState
32
accel/tcg: Keep TranslationBlock headers local to TCG
36
accel/tcg: Use CPUState in atomicity helpers
33
cpu: Remove duplicated 'sysemu/hw_accel.h' header
37
accel/tcg: Remove env_tlb()
34
cpu: Split as cpu-common / cpu-sysemu
38
accel/tcg: Unify user and softmmu do_[st|ld]*_mmu()
35
cpu: Un-inline cpu_get_phys_page_debug and cpu_asidx_from_attrs
39
accel/tcg: move ld/st helpers to ldst_common.c.inc
36
cpu: Introduce cpu_virtio_is_big_endian()
40
37
cpu: Directly use cpu_write_elf*() fallback handlers in place
41
Jiajie Chen (16):
38
cpu: Directly use get_paging_enabled() fallback handlers in place
42
tcg/loongarch64: Import LSX instructions
39
cpu: Directly use get_memory_mapping() fallback handlers in place
43
tcg/loongarch64: Lower basic tcg vec ops to LSX
40
cpu: Assert DeviceClass::vmsd is NULL on user emulation
44
tcg: pass vece to tcg_target_const_match()
41
cpu: Rename CPUClass vmsd -> legacy_vmsd
45
tcg/loongarch64: Lower cmp_vec to vseq/vsle/vslt
42
cpu: Move AVR target vmsd field from CPUClass to DeviceClass
46
tcg/loongarch64: Lower add/sub_vec to vadd/vsub
43
cpu: Introduce SysemuCPUOps structure
47
tcg/loongarch64: Lower vector bitwise operations
44
cpu: Move CPUClass::vmsd to SysemuCPUOps
48
tcg/loongarch64: Lower neg_vec to vneg
45
cpu: Move CPUClass::virtio_is_big_endian to SysemuCPUOps
49
tcg/loongarch64: Lower mul_vec to vmul
46
cpu: Move CPUClass::get_crash_info to SysemuCPUOps
50
tcg/loongarch64: Lower vector min max ops
47
cpu: Move CPUClass::write_elf* to SysemuCPUOps
51
tcg/loongarch64: Lower vector saturated ops
48
cpu: Move CPUClass::asidx_from_attrs to SysemuCPUOps
52
tcg/loongarch64: Lower vector shift vector ops
49
cpu: Move CPUClass::get_phys_page_debug to SysemuCPUOps
53
tcg/loongarch64: Lower bitsel_vec to vbitsel
50
cpu: Move CPUClass::get_memory_mapping to SysemuCPUOps
54
tcg/loongarch64: Lower vector shift integer ops
51
cpu: Move CPUClass::get_paging_enabled to SysemuCPUOps
55
tcg/loongarch64: Lower rotv_vec ops to LSX
52
56
tcg/loongarch64: Lower rotli_vec to vrotri
53
Richard Henderson (2):
57
tcg/loongarch64: Implement 128-bit load & store
54
target/mips: Fold jazz behaviour into mips_cpu_do_transaction_failed
58
55
hw/core: Constify TCGCPUOps
59
LIU Zhiwei (2):
56
60
accel/tcg: Fix the comment for CPUTLBEntryFull
57
Yasuo Kuwahara (1):
61
fpu: Add conversions between bfloat16 and [u]int8
58
tcg/aarch64: Fix tcg_out_rotl
62
59
63
Nicholas Piggin (1):
60
{include/exec => accel/tcg}/tb-context.h | 0
64
accel/tcg: mttcg remove false-negative halted assertion
61
{include/exec => accel/tcg}/tb-hash.h | 0
65
62
{include/exec => accel/tcg}/tb-lookup.h | 2 +-
66
Philippe Mathieu-Daudé (16):
63
include/exec/exec-all.h | 1 -
67
exec: Make EXCP_FOO definitions target agnostic
64
include/exec/memory.h | 2 +-
68
exec: Move cpu_loop_foo() target agnostic functions to 'cpu-common.h'
65
include/hw/core/cpu.h | 94 +++++---------------
69
accel/tcg: Restrict dump_exec_info() declaration
66
include/hw/core/sysemu-cpu-ops.h | 92 ++++++++++++++++++++
70
accel: Make accel-blocker.o target agnostic
67
include/migration/vmstate.h | 2 -
71
accel: Rename accel-common.c -> accel-target.c
68
include/tcg/tcg.h | 1 -
72
exec: Rename cpu.c -> cpu-target.c
69
plugins/plugin.h | 1 +
73
exec: Rename target specific page-vary.c -> page-vary-target.c
70
target/mips/cpu-qom.h | 3 +
74
accel/tcg: Rename target-specific 'internal.h' -> 'internal-target.h'
71
include/exec/memory_ldst.h.inc | 16 ++--
75
accel/tcg: Make monitor.c a target-agnostic unit
72
include/exec/memory_ldst_cached.h.inc | 46 +++++-----
76
accel/tcg: Make icount.o a target agnostic unit
73
include/exec/memory_ldst_phys.h.inc | 72 +++++++--------
77
accel/tcg: Make cpu-exec-common.c a target agnostic unit
74
accel/tcg/cpu-exec.c | 5 +-
78
accel: Rename accel_cpu_realizefn() -> accel_cpu_realize()
75
accel/tcg/cputlb.c | 2 +-
79
accel: Introduce accel_cpu_unrealize() stub
76
accel/tcg/tcg-runtime.c | 2 +-
80
accel: Declare AccelClass::[un]realize_cpu() handlers
77
accel/tcg/translate-all.c | 3 +-
81
accel/tcg: Have tcg_exec_realizefn() return a boolean
78
cpu.c | 18 ++--
82
accel/tcg: Restrict tcg_exec_[un]realizefn() to TCG
79
hw/core/{cpu.c => cpu-common.c} | 116 -------------------------
83
80
hw/core/cpu-sysemu.c | 145 +++++++++++++++++++++++++++++++
84
Richard Henderson (31):
81
hw/mips/jazz.c | 35 +-------
85
tcg: Add gvec compare with immediate and scalar operand
82
hw/virtio/virtio.c | 4 +-
86
target/arm: Use tcg_gen_gvec_cmpi for compare vs 0
83
softmmu/physmem.c | 10 +++
87
accel/tcg: Simplify tlb_plugin_lookup
84
stubs/vmstate.c | 2 -
88
accel/tcg: Split out io_prepare and io_failed
85
target/alpha/cpu.c | 12 ++-
89
accel/tcg: Use CPUTLBEntryFull.phys_addr in io_failed
86
target/arm/cpu.c | 22 +++--
90
plugin: Simplify struct qemu_plugin_hwaddr
87
target/arm/cpu_tcg.c | 2 +-
91
accel/tcg: Merge cpu_transaction_failed into io_failed
88
target/avr/cpu.c | 12 ++-
92
accel/tcg: Replace direct use of io_readx/io_writex in do_{ld,st}_1
89
target/avr/machine.c | 4 +-
93
accel/tcg: Merge io_readx into do_ld_mmio_beN
90
target/cris/cpu.c | 14 ++-
94
accel/tcg: Merge io_writex into do_st_mmio_leN
91
target/hexagon/cpu.c | 2 +-
95
accel/tcg: Introduce do_ld16_mmio_beN
92
target/hppa/cpu.c | 12 ++-
96
accel/tcg: Introduce do_st16_mmio_leN
93
target/i386/cpu.c | 30 ++++---
97
fpu: Handle m68k extended precision denormals properly
94
target/i386/tcg/tcg-cpu.c | 2 +-
98
tcg: Add tcg_out_tb_start backend hook
95
target/m68k/cpu.c | 12 ++-
99
util/cpuinfo-aarch64: Add CPUINFO_BTI
96
target/microblaze/cpu.c | 12 ++-
100
tcg/aarch64: Emit BTI insns at jump landing pads
97
target/mips/cpu.c | 14 ++-
101
tcg: Map code_gen_buffer with PROT_BTI
98
target/mips/tcg/op_helper.c | 3 +-
102
accel/tcg: Move CPUTLB definitions from cpu-defs.h
99
target/nios2/cpu.c | 12 ++-
103
qom: Propagate alignment through type system
100
target/openrisc/cpu.c | 12 ++-
104
target/arm: Remove size and alignment for cpu subclasses
101
target/ppc/cpu_init.c | 24 ++---
105
target/*: Add instance_align to all cpu base classes
102
target/riscv/cpu.c | 19 ++--
106
accel/tcg: Validate placement of CPUNegativeOffsetState
103
target/rx/cpu.c | 14 ++-
107
accel/tcg: Move CPUNegativeOffsetState into CPUState
104
target/s390x/cpu.c | 18 ++--
108
accel/tcg: Remove CPUState.icount_decr_ptr
105
target/sh4/cpu.c | 15 +++-
109
accel/tcg: Move can_do_io to CPUNegativeOffsetState
106
target/sparc/cpu.c | 14 ++-
110
accel/tcg: Remove cpu_neg()
107
target/tricore/cpu.c | 10 ++-
111
tcg: Rename cpu_env to tcg_env
108
target/xtensa/cpu.c | 14 ++-
112
accel/tcg: Replace CPUState.env_ptr with cpu_env()
109
memory_ldst.c.inc | 20 ++---
113
accel/tcg: Remove cpu_set_cpustate_pointers
110
tcg/aarch64/tcg-target.c.inc | 5 +-
114
accel/tcg: Remove env_neg()
111
MAINTAINERS | 1 -
115
tcg: Remove TCGContext.tlb_fast_offset
112
hw/core/meson.build | 3 +-
116
113
53 files changed, 602 insertions(+), 406 deletions(-)
117
MAINTAINERS | 7 +-
114
rename {include/exec => accel/tcg}/tb-context.h (100%)
118
meson.build | 6 +-
115
rename {include/exec => accel/tcg}/tb-hash.h (100%)
119
accel/tcg/atomic_template.h | 20 +-
116
rename {include/exec => accel/tcg}/tb-lookup.h (98%)
120
accel/tcg/internal-common.h | 28 +
117
create mode 100644 include/hw/core/sysemu-cpu-ops.h
121
accel/tcg/{internal.h => internal-target.h} | 21 +-
118
rename hw/core/{cpu.c => cpu-common.c} (73%)
122
accel/tcg/tcg-runtime.h | 25 +
119
create mode 100644 hw/core/cpu-sysemu.c
123
host/include/aarch64/host/cpuinfo.h | 1 +
120
124
include/exec/cpu-all.h | 67 +-
125
include/exec/cpu-common.h | 39 +
126
include/exec/cpu-defs.h | 138 -
127
include/exec/cpu_ldst.h | 8 +-
128
include/exec/exec-all.h | 32 +-
129
include/exec/user/thunk.h | 3 +-
130
include/fpu/softfloat.h | 12 +
131
include/hw/core/cpu.h | 180 +-
132
include/qemu/accel.h | 12 +-
133
include/qemu/plugin-memory.h | 11 +-
134
include/qemu/typedefs.h | 1 -
135
include/tcg/tcg-op-gvec-common.h | 6 +
136
include/tcg/tcg.h | 3 +-
137
target/alpha/cpu.h | 1 -
138
target/arm/common-semi-target.h | 2 +-
139
target/arm/cpu-param.h | 12 -
140
target/arm/cpu.h | 1 -
141
target/arm/tcg/translate-a32.h | 2 +-
142
target/arm/tcg/translate-a64.h | 4 +-
143
target/arm/tcg/translate.h | 16 +-
144
target/avr/cpu.h | 1 -
145
target/cris/cpu.h | 1 -
146
target/hexagon/cpu.h | 2 +-
147
target/hexagon/gen_tcg.h | 120 +-
148
target/hexagon/gen_tcg_hvx.h | 20 +-
149
target/hexagon/macros.h | 8 +-
150
target/hppa/cpu.h | 1 -
151
target/i386/cpu.h | 1 -
152
target/loongarch/cpu.h | 1 -
153
target/m68k/cpu.h | 1 -
154
target/microblaze/cpu.h | 6 +-
155
target/mips/cpu.h | 4 +-
156
target/mips/tcg/translate.h | 6 +-
157
target/nios2/cpu.h | 1 -
158
target/openrisc/cpu.h | 1 -
159
target/ppc/cpu.h | 1 -
160
target/riscv/cpu.h | 2 +-
161
target/rx/cpu.h | 1 -
162
target/s390x/cpu.h | 1 -
163
target/sh4/cpu.h | 1 -
164
target/sparc/cpu.h | 1 -
165
target/tricore/cpu.h | 1 -
166
target/xtensa/cpu.h | 3 +-
167
tcg/loongarch64/tcg-target-con-set.h | 9 +
168
tcg/loongarch64/tcg-target-con-str.h | 3 +
169
tcg/loongarch64/tcg-target.h | 40 +-
170
tcg/loongarch64/tcg-target.opc.h | 12 +
171
accel/{accel-common.c => accel-target.c} | 27 +-
172
accel/dummy-cpus.c | 2 +-
173
accel/kvm/kvm-accel-ops.c | 2 +-
174
accel/tcg/cpu-exec-common.c | 5 +-
175
accel/tcg/cpu-exec.c | 31 +-
176
accel/tcg/cputlb.c | 1156 ++--
177
softmmu/icount.c => accel/tcg/icount-common.c | 7 +-
178
accel/tcg/monitor.c | 2 +-
179
accel/tcg/plugin-gen.c | 10 +-
180
accel/tcg/tb-maint.c | 3 +-
181
accel/tcg/tcg-accel-ops-icount.c | 8 +-
182
accel/tcg/tcg-accel-ops-mttcg.c | 11 +-
183
accel/tcg/tcg-accel-ops-rr.c | 4 +-
184
accel/tcg/tcg-accel-ops.c | 2 +-
185
accel/tcg/tcg-all.c | 4 +-
186
accel/tcg/tcg-runtime-gvec.c | 26 +
187
accel/tcg/translate-all.c | 15 +-
188
accel/tcg/translator.c | 22 +-
189
accel/tcg/user-exec.c | 279 +-
190
bsd-user/main.c | 2 +-
191
bsd-user/signal.c | 10 +-
192
cpus-common.c => cpu-common.c | 0
193
cpu.c => cpu-target.c | 13 +-
194
fpu/softfloat.c | 67 +-
195
gdbstub/gdbstub.c | 4 +-
196
gdbstub/user-target.c | 2 +-
197
hw/core/cpu-common.c | 6 +-
198
hw/i386/kvm/clock.c | 2 +-
199
hw/intc/mips_gic.c | 2 +-
200
hw/intc/riscv_aclint.c | 12 +-
201
hw/intc/riscv_imsic.c | 2 +-
202
hw/ppc/e500.c | 4 +-
203
hw/ppc/spapr.c | 2 +-
204
linux-user/elfload.c | 4 +-
205
linux-user/i386/cpu_loop.c | 2 +-
206
linux-user/main.c | 4 +-
207
linux-user/signal.c | 15 +-
208
monitor/hmp-cmds-target.c | 2 +-
209
page-vary.c => page-vary-target.c | 0
210
plugins/api.c | 27 +-
211
qom/object.c | 14 +
212
semihosting/arm-compat-semi.c | 6 +-
213
semihosting/syscalls.c | 28 +-
214
softmmu/async-teardown.c | 3 -
215
softmmu/watchpoint.c | 2 +-
216
target/alpha/cpu.c | 3 +-
217
target/alpha/translate.c | 146 +-
218
target/arm/cpu.c | 12 +-
219
target/arm/cpu64.c | 4 -
220
target/arm/helper.c | 2 +-
221
target/arm/ptw.c | 4 +-
222
target/arm/tcg/mte_helper.c | 2 +-
223
target/arm/tcg/sve_helper.c | 2 +-
224
target/arm/tcg/tlb_helper.c | 4 +-
225
target/arm/tcg/translate-a64.c | 380 +-
226
target/arm/tcg/translate-m-nocp.c | 24 +-
227
target/arm/tcg/translate-mve.c | 52 +-
228
target/arm/tcg/translate-neon.c | 78 +-
229
target/arm/tcg/translate-sme.c | 8 +-
230
target/arm/tcg/translate-sve.c | 172 +-
231
target/arm/tcg/translate-vfp.c | 56 +-
232
target/arm/tcg/translate.c | 290 +-
233
target/avr/cpu.c | 3 +-
234
target/avr/translate.c | 66 +-
235
target/cris/cpu.c | 3 +-
236
target/cris/translate.c | 72 +-
237
target/hexagon/cpu.c | 4 +-
238
target/hexagon/genptr.c | 36 +-
239
target/hexagon/idef-parser/parser-helpers.c | 2 +-
240
target/hexagon/translate.c | 52 +-
241
target/hppa/cpu.c | 2 +-
242
target/hppa/mem_helper.c | 2 +-
243
target/hppa/translate.c | 148 +-
244
target/i386/cpu.c | 2 +-
245
target/i386/kvm/kvm-cpu.c | 2 +-
246
target/i386/nvmm/nvmm-all.c | 14 +-
247
target/i386/tcg/sysemu/excp_helper.c | 2 +-
248
target/i386/tcg/tcg-cpu.c | 2 +-
249
target/i386/tcg/translate.c | 584 +-
250
target/i386/whpx/whpx-all.c | 26 +-
251
target/loongarch/cpu.c | 9 +-
252
target/loongarch/translate.c | 22 +-
253
target/m68k/cpu.c | 9 +-
254
target/m68k/translate.c | 306 +-
255
target/microblaze/cpu.c | 2 +-
256
target/microblaze/translate.c | 52 +-
257
target/mips/cpu.c | 2 +-
258
target/mips/tcg/lcsr_translate.c | 6 +-
259
target/mips/tcg/msa_translate.c | 34 +-
260
target/mips/tcg/mxu_translate.c | 4 +-
261
target/mips/tcg/sysemu/mips-semi.c | 4 +-
262
target/mips/tcg/translate.c | 1288 ++---
263
target/mips/tcg/vr54xx_translate.c | 2 +-
264
target/nios2/cpu.c | 5 +-
265
target/nios2/translate.c | 52 +-
266
target/openrisc/cpu.c | 7 +-
267
target/openrisc/translate.c | 86 +-
268
target/ppc/cpu_init.c | 1 -
269
target/ppc/excp_helper.c | 10 +-
270
target/ppc/translate.c | 366 +-
271
target/riscv/cpu.c | 8 +-
272
target/riscv/translate.c | 56 +-
273
target/rx/cpu.c | 5 +-
274
target/rx/translate.c | 58 +-
275
target/s390x/cpu.c | 2 -
276
target/s390x/tcg/translate.c | 426 +-
277
target/sh4/cpu.c | 3 +-
278
target/sh4/op_helper.c | 2 +-
279
target/sh4/translate.c | 128 +-
280
target/sparc/cpu.c | 3 +-
281
target/sparc/translate.c | 332 +-
282
target/tricore/cpu.c | 10 +-
283
target/tricore/translate.c | 224 +-
284
target/xtensa/cpu.c | 2 +-
285
target/xtensa/translate.c | 192 +-
286
tcg/region.c | 41 +-
287
tcg/tcg-op-gvec.c | 437 +-
288
tcg/tcg-op-ldst.c | 22 +-
289
tcg/tcg-op.c | 2 +-
290
tcg/tcg.c | 26 +-
291
tests/tcg/m68k/denormal.c | 53 +
292
util/cpuinfo-aarch64.c | 7 +
293
util/oslib-posix.c | 15 +-
294
accel/tcg/ldst_atomicity.c.inc | 88 +-
295
accel/tcg/ldst_common.c.inc | 225 +
296
fpu/softfloat-parts.c.inc | 7 +-
297
target/cris/translate_v10.c.inc | 28 +-
298
target/i386/tcg/decode-new.c.inc | 4 +-
299
target/i386/tcg/emit.c.inc | 262 +-
300
target/loongarch/insn_trans/trans_atomic.c.inc | 4 +-
301
target/loongarch/insn_trans/trans_branch.c.inc | 2 +-
302
target/loongarch/insn_trans/trans_extra.c.inc | 10 +-
303
target/loongarch/insn_trans/trans_farith.c.inc | 6 +-
304
target/loongarch/insn_trans/trans_fcmp.c.inc | 8 +-
305
target/loongarch/insn_trans/trans_fmemory.c.inc | 8 +-
306
target/loongarch/insn_trans/trans_fmov.c.inc | 20 +-
307
target/loongarch/insn_trans/trans_lsx.c.inc | 44 +-
308
target/loongarch/insn_trans/trans_memory.c.inc | 8 +-
309
target/loongarch/insn_trans/trans_privileged.c.inc | 52 +-
310
target/mips/tcg/micromips_translate.c.inc | 12 +-
311
target/mips/tcg/nanomips_translate.c.inc | 200 +-
312
target/ppc/power8-pmu-regs.c.inc | 8 +-
313
target/ppc/translate/branch-impl.c.inc | 2 +-
314
target/ppc/translate/dfp-impl.c.inc | 22 +-
315
target/ppc/translate/fixedpoint-impl.c.inc | 2 +-
316
target/ppc/translate/fp-impl.c.inc | 50 +-
317
target/ppc/translate/processor-ctrl-impl.c.inc | 8 +-
318
target/ppc/translate/spe-impl.c.inc | 30 +-
319
target/ppc/translate/storage-ctrl-impl.c.inc | 26 +-
320
target/ppc/translate/vmx-impl.c.inc | 34 +-
321
target/ppc/translate/vsx-impl.c.inc | 54 +-
322
target/riscv/insn_trans/trans_privileged.c.inc | 8 +-
323
target/riscv/insn_trans/trans_rvbf16.c.inc | 10 +-
324
target/riscv/insn_trans/trans_rvd.c.inc | 48 +-
325
target/riscv/insn_trans/trans_rvf.c.inc | 46 +-
326
target/riscv/insn_trans/trans_rvh.c.inc | 8 +-
327
target/riscv/insn_trans/trans_rvi.c.inc | 16 +-
328
target/riscv/insn_trans/trans_rvm.c.inc | 16 +-
329
target/riscv/insn_trans/trans_rvv.c.inc | 130 +-
330
target/riscv/insn_trans/trans_rvvk.c.inc | 30 +-
331
target/riscv/insn_trans/trans_rvzce.c.inc | 2 +-
332
target/riscv/insn_trans/trans_rvzfa.c.inc | 38 +-
333
target/riscv/insn_trans/trans_rvzfh.c.inc | 54 +-
334
target/riscv/insn_trans/trans_rvzicbo.c.inc | 8 +-
335
target/riscv/insn_trans/trans_svinval.c.inc | 6 +-
336
target/riscv/insn_trans/trans_xthead.c.inc | 2 +-
337
target/s390x/tcg/translate_vx.c.inc | 104 +-
338
tcg/aarch64/tcg-target.c.inc | 61 +-
339
tcg/arm/tcg-target.c.inc | 9 +-
340
tcg/i386/tcg-target.c.inc | 7 +-
341
tcg/loongarch64/tcg-insn-defs.c.inc | 6019 +++++++++++++++++++-
342
tcg/loongarch64/tcg-target.c.inc | 628 +-
343
tcg/mips/tcg-target.c.inc | 7 +-
344
tcg/ppc/tcg-target.c.inc | 7 +-
345
tcg/riscv/tcg-target.c.inc | 7 +-
346
tcg/s390x/tcg-target.c.inc | 7 +-
347
tcg/sparc64/tcg-target.c.inc | 7 +-
348
tcg/tci/tcg-target.c.inc | 7 +-
349
accel/meson.build | 4 +-
350
accel/tcg/meson.build | 8 +-
351
softmmu/meson.build | 4 -
352
target/hexagon/README | 10 +-
353
target/hexagon/gen_tcg_funcs.py | 16 +-
354
tests/tcg/m68k/Makefile.target | 2 +-
355
238 files changed, 12363 insertions(+), 5537 deletions(-)
356
create mode 100644 accel/tcg/internal-common.h
357
rename accel/tcg/{internal.h => internal-target.h} (89%)
358
create mode 100644 tcg/loongarch64/tcg-target.opc.h
359
rename accel/{accel-common.c => accel-target.c} (87%)
360
rename softmmu/icount.c => accel/tcg/icount-common.c (99%)
361
rename cpus-common.c => cpu-common.c (100%)
362
rename cpu.c => cpu-target.c (97%)
363
rename page-vary.c => page-vary-target.c (100%)
364
create mode 100644 tests/tcg/m68k/denormal.c
365
diff view generated by jsdifflib
New patch
1
From: Nicholas Piggin <npiggin@gmail.com>
1
2
3
mttcg asserts that an execution ending with EXCP_HALTED must have
4
cpu->halted. However between the event or instruction that sets
5
cpu->halted and requests exit and the assertion here, an
6
asynchronous event could clear cpu->halted.
7
8
This leads to crashes running AIX on ppc/pseries because it uses
9
H_CEDE/H_PROD hcalls, where H_CEDE sets self->halted = 1 and
10
H_PROD sets other cpu->halted = 0 and kicks it.
11
12
H_PROD could be turned into an interrupt to wake, but several other
13
places in ppc, sparc, and semihosting follow what looks like a similar
14
pattern setting halted = 0 directly. So remove this assertion.
15
16
Reported-by: Ivan Warren <ivan@vmfacility.fr>
17
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
18
Message-Id: <20230829010658.8252-1-npiggin@gmail.com>
19
[rth: Keep the case label and adjust the comment.]
20
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
21
---
22
accel/tcg/tcg-accel-ops-mttcg.c | 9 ++-------
23
1 file changed, 2 insertions(+), 7 deletions(-)
24
25
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/accel/tcg/tcg-accel-ops-mttcg.c
28
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
29
@@ -XXX,XX +XXX,XX @@ static void *mttcg_cpu_thread_fn(void *arg)
30
break;
31
case EXCP_HALTED:
32
/*
33
- * during start-up the vCPU is reset and the thread is
34
- * kicked several times. If we don't ensure we go back
35
- * to sleep in the halted state we won't cleanly
36
- * start-up when the vCPU is enabled.
37
- *
38
- * cpu->halted should ensure we sleep in wait_io_event
39
+ * Usually cpu->halted is set, but may have already been
40
+ * reset by another thread by the time we arrive here.
41
*/
42
- g_assert(cpu->halted);
43
break;
44
case EXCP_ATOMIC:
45
qemu_mutex_unlock_iothread();
46
--
47
2.34.1
diff view generated by jsdifflib
New patch
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
1
2
3
When memory region is ram, the lower TARGET_PAGE_BITS is not the
4
physical section number. Instead, its value is always 0.
5
6
Add comment and assert to make it clear.
7
8
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
9
Message-Id: <20230901060118.379-1-zhiwei_liu@linux.alibaba.com>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
---
13
include/exec/cpu-defs.h | 12 ++++++------
14
accel/tcg/cputlb.c | 11 +++++++----
15
2 files changed, 13 insertions(+), 10 deletions(-)
16
17
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/include/exec/cpu-defs.h
20
+++ b/include/exec/cpu-defs.h
21
@@ -XXX,XX +XXX,XX @@
22
typedef struct CPUTLBEntryFull {
23
/*
24
* @xlat_section contains:
25
- * - in the lower TARGET_PAGE_BITS, a physical section number
26
- * - with the lower TARGET_PAGE_BITS masked off, an offset which
27
- * must be added to the virtual address to obtain:
28
- * + the ram_addr_t of the target RAM (if the physical section
29
- * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
30
- * + the offset within the target MemoryRegion (otherwise)
31
+ * - For ram, an offset which must be added to the virtual address
32
+ * to obtain the ram_addr_t of the target RAM
33
+ * - For other memory regions,
34
+ * + in the lower TARGET_PAGE_BITS, the physical section number
35
+ * + with the TARGET_PAGE_BITS masked off, the offset within
36
+ * the target MemoryRegion
37
*/
38
hwaddr xlat_section;
39
40
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/accel/tcg/cputlb.c
43
+++ b/accel/tcg/cputlb.c
44
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
45
write_flags = read_flags;
46
if (is_ram) {
47
iotlb = memory_region_get_ram_addr(section->mr) + xlat;
48
+ assert(!(iotlb & ~TARGET_PAGE_MASK));
49
/*
50
* Computing is_clean is expensive; avoid all that unless
51
* the page is actually writable.
52
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
53
54
/* refill the tlb */
55
/*
56
- * At this point iotlb contains a physical section number in the lower
57
- * TARGET_PAGE_BITS, and either
58
- * + the ram_addr_t of the page base of the target RAM (RAM)
59
- * + the offset within section->mr of the page base (I/O, ROMD)
60
+ * When memory region is ram, iotlb contains a TARGET_PAGE_BITS
61
+ * aligned ram_addr_t of the page base of the target RAM.
62
+ * Otherwise, iotlb contains
63
+ * - a physical section number in the lower TARGET_PAGE_BITS
64
+ * - the offset within section->mr of the page base (I/O, ROMD) with the
65
+ * TARGET_PAGE_BITS masked off.
66
* We subtract addr_page (which is page aligned and thus won't
67
* disturb the low bits) to give an offset which can be added to the
68
* (non-page-aligned) vaddr of the eventual memory access to get
69
--
70
2.34.1
diff view generated by jsdifflib
New patch
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
1
2
3
IA-64 and PA-RISC host support is already removed with commit
4
b1cef6d02f ("Drop remaining bits of ia64 host support").
5
6
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
7
Message-Id: <20230810225922.21600-1-akihiko.odaki@daynix.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
10
util/oslib-posix.c | 15 +++------------
11
1 file changed, 3 insertions(+), 12 deletions(-)
12
13
diff --git a/util/oslib-posix.c b/util/oslib-posix.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/util/oslib-posix.c
16
+++ b/util/oslib-posix.c
17
@@ -XXX,XX +XXX,XX @@ char *qemu_get_pid_name(pid_t pid)
18
19
void *qemu_alloc_stack(size_t *sz)
20
{
21
- void *ptr, *guardpage;
22
+ void *ptr;
23
int flags;
24
#ifdef CONFIG_DEBUG_STACK_USAGE
25
void *ptr2;
26
@@ -XXX,XX +XXX,XX @@ void *qemu_alloc_stack(size_t *sz)
27
abort();
28
}
29
30
-#if defined(HOST_IA64)
31
- /* separate register stack */
32
- guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz);
33
-#elif defined(HOST_HPPA)
34
- /* stack grows up */
35
- guardpage = ptr + *sz - pagesz;
36
-#else
37
- /* stack grows down */
38
- guardpage = ptr;
39
-#endif
40
- if (mprotect(guardpage, pagesz, PROT_NONE) != 0) {
41
+ /* Stack grows down -- guard page at the bottom. */
42
+ if (mprotect(ptr, pagesz, PROT_NONE) != 0) {
43
perror("failed to set up stack guard page");
44
abort();
45
}
46
--
47
2.34.1
diff view generated by jsdifflib
1
Add a flag to MIPSCPUClass in order to avoid needing to
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
2
replace mips_tcg_ops.do_transaction_failed.
3
2
4
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
PA-RISC host support is already removed with commit
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
4
b1cef6d02f ("Drop remaining bits of ia64 host support").
5
6
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
7
Message-Id: <20230810225922.21600-1-akihiko.odaki@daynix.com>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-ID: <20210227232519.222663-2-richard.henderson@linaro.org>
8
---
9
---
9
target/mips/cpu-qom.h | 3 +++
10
softmmu/async-teardown.c | 3 ---
10
hw/mips/jazz.c | 35 +++--------------------------------
11
1 file changed, 3 deletions(-)
11
target/mips/tcg/op_helper.c | 3 ++-
12
3 files changed, 8 insertions(+), 33 deletions(-)
13
12
14
diff --git a/target/mips/cpu-qom.h b/target/mips/cpu-qom.h
13
diff --git a/softmmu/async-teardown.c b/softmmu/async-teardown.c
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/target/mips/cpu-qom.h
15
--- a/softmmu/async-teardown.c
17
+++ b/target/mips/cpu-qom.h
16
+++ b/softmmu/async-teardown.c
18
@@ -XXX,XX +XXX,XX @@ struct MIPSCPUClass {
17
@@ -XXX,XX +XXX,XX @@ static void *new_stack_for_clone(void)
19
DeviceRealize parent_realize;
18
20
DeviceReset parent_reset;
19
/* Allocate a new stack and get a pointer to its top. */
21
const struct mips_def_t *cpu_def;
20
stack_ptr = qemu_alloc_stack(&stack_size);
22
+
21
-#if !defined(HOST_HPPA)
23
+ /* Used for the jazz board to modify mips_cpu_do_transaction_failed. */
22
- /* The top is at the end of the area, except on HPPA. */
24
+ bool no_data_aborts;
23
stack_ptr += stack_size;
25
};
24
-#endif
26
25
27
26
return stack_ptr;
28
diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/hw/mips/jazz.c
31
+++ b/hw/mips/jazz.c
32
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps dma_dummy_ops = {
33
#define MAGNUM_BIOS_SIZE \
34
(BIOS_SIZE < MAGNUM_BIOS_SIZE_MAX ? BIOS_SIZE : MAGNUM_BIOS_SIZE_MAX)
35
36
-#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
37
-static void (*real_do_transaction_failed)(CPUState *cpu, hwaddr physaddr,
38
- vaddr addr, unsigned size,
39
- MMUAccessType access_type,
40
- int mmu_idx, MemTxAttrs attrs,
41
- MemTxResult response,
42
- uintptr_t retaddr);
43
-
44
-static void mips_jazz_do_transaction_failed(CPUState *cs, hwaddr physaddr,
45
- vaddr addr, unsigned size,
46
- MMUAccessType access_type,
47
- int mmu_idx, MemTxAttrs attrs,
48
- MemTxResult response,
49
- uintptr_t retaddr)
50
-{
51
- if (access_type != MMU_INST_FETCH) {
52
- /* ignore invalid access (ie do not raise exception) */
53
- return;
54
- }
55
- (*real_do_transaction_failed)(cs, physaddr, addr, size, access_type,
56
- mmu_idx, attrs, response, retaddr);
57
-}
58
-#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
59
-
60
static void mips_jazz_init(MachineState *machine,
61
enum jazz_model_e jazz_model)
62
{
63
@@ -XXX,XX +XXX,XX @@ static void mips_jazz_init(MachineState *machine,
64
int bios_size, n;
65
Clock *cpuclk;
66
MIPSCPU *cpu;
67
- CPUClass *cc;
68
+ MIPSCPUClass *mcc;
69
CPUMIPSState *env;
70
qemu_irq *i8259;
71
rc4030_dma *dmas;
72
@@ -XXX,XX +XXX,XX @@ static void mips_jazz_init(MachineState *machine,
73
* However, we can't simply add a global memory region to catch
74
* everything, as this would make all accesses including instruction
75
* accesses be ignored and not raise exceptions.
76
- * So instead we hijack the do_transaction_failed method on the CPU, and
77
- * do not raise exceptions for data access.
78
*
79
* NOTE: this behaviour of raising exceptions for bad instruction
80
* fetches but not bad data accesses was added in commit 54e755588cf1e9
81
@@ -XXX,XX +XXX,XX @@ static void mips_jazz_init(MachineState *machine,
82
* we could replace this hijacking of CPU methods with a simple global
83
* memory region that catches all memory accesses, as we do on Malta.
84
*/
85
- cc = CPU_GET_CLASS(cpu);
86
-#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
87
- real_do_transaction_failed = cc->tcg_ops->do_transaction_failed;
88
- cc->tcg_ops->do_transaction_failed = mips_jazz_do_transaction_failed;
89
-#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
90
+ mcc = MIPS_CPU_GET_CLASS(cpu);
91
+ mcc->no_data_aborts = true;
92
93
/* allocate RAM */
94
memory_region_add_subregion(address_space, 0, machine->ram);
95
diff --git a/target/mips/tcg/op_helper.c b/target/mips/tcg/op_helper.c
96
index XXXXXXX..XXXXXXX 100644
97
--- a/target/mips/tcg/op_helper.c
98
+++ b/target/mips/tcg/op_helper.c
99
@@ -XXX,XX +XXX,XX @@ void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
100
MemTxResult response, uintptr_t retaddr)
101
{
102
MIPSCPU *cpu = MIPS_CPU(cs);
103
+ MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu);
104
CPUMIPSState *env = &cpu->env;
105
106
if (access_type == MMU_INST_FETCH) {
107
do_raise_exception(env, EXCP_IBE, retaddr);
108
- } else {
109
+ } else if (!mcc->no_data_aborts) {
110
do_raise_exception(env, EXCP_DBE, retaddr);
111
}
112
}
27
}
113
--
28
--
114
2.25.1
29
2.34.1
115
116
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: Jiajie Chen <c@jia.je>
2
2
3
Use uint8_t for (unsigned) byte, and uint16_t for (unsigned)
3
Add opcodes and encoder functions for LSX.
4
16-bit word.
5
4
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Generated from
7
Message-Id: <20210518183655.1711377-4-philmd@redhat.com>
6
https://github.com/jiegec/loongarch-opcodes/tree/qemu-lsx.
7
8
Signed-off-by: Jiajie Chen <c@jia.je>
9
Acked-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-Id: <20230908022302.180442-2-c@jia.je>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
12
---
10
include/exec/memory_ldst.h.inc | 16 ++++++++--------
13
tcg/loongarch64/tcg-insn-defs.c.inc | 6019 ++++++++++++++++++++++++++-
11
memory_ldst.c.inc | 20 ++++++++++----------
14
1 file changed, 6018 insertions(+), 1 deletion(-)
12
2 files changed, 18 insertions(+), 18 deletions(-)
13
15
14
diff --git a/include/exec/memory_ldst.h.inc b/include/exec/memory_ldst.h.inc
16
diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc
15
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
16
--- a/include/exec/memory_ldst.h.inc
18
--- a/tcg/loongarch64/tcg-insn-defs.c.inc
17
+++ b/include/exec/memory_ldst.h.inc
19
+++ b/tcg/loongarch64/tcg-insn-defs.c.inc
18
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@
21
*
22
* This file is auto-generated by genqemutcgdefs from
23
* https://github.com/loongson-community/loongarch-opcodes,
24
- * from commit 25ca7effe9d88101c1cf96c4005423643386d81f.
25
+ * from commit 8027da9a8157a8b47fc48ff1def292e09c5668bd.
26
* DO NOT EDIT.
19
*/
27
*/
20
28
21
#ifdef TARGET_ENDIANNESS
29
@@ -XXX,XX +XXX,XX @@ typedef enum {
22
-extern uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
30
OPC_ANDI = 0x03400000,
23
+extern uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
31
OPC_ORI = 0x03800000,
24
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
32
OPC_XORI = 0x03c00000,
25
extern uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
33
+ OPC_VFMADD_S = 0x09100000,
26
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
34
+ OPC_VFMADD_D = 0x09200000,
27
@@ -XXX,XX +XXX,XX @@ extern uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
35
+ OPC_VFMSUB_S = 0x09500000,
28
extern void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
36
+ OPC_VFMSUB_D = 0x09600000,
29
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
37
+ OPC_VFNMADD_S = 0x09900000,
30
extern void glue(address_space_stw, SUFFIX)(ARG1_DECL,
38
+ OPC_VFNMADD_D = 0x09a00000,
31
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
39
+ OPC_VFNMSUB_S = 0x09d00000,
32
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
40
+ OPC_VFNMSUB_D = 0x09e00000,
33
extern void glue(address_space_stl, SUFFIX)(ARG1_DECL,
41
+ OPC_VFCMP_CAF_S = 0x0c500000,
34
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
42
+ OPC_VFCMP_SAF_S = 0x0c508000,
35
extern void glue(address_space_stq, SUFFIX)(ARG1_DECL,
43
+ OPC_VFCMP_CLT_S = 0x0c510000,
36
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result);
44
+ OPC_VFCMP_SLT_S = 0x0c518000,
37
#else
45
+ OPC_VFCMP_CEQ_S = 0x0c520000,
38
-extern uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
46
+ OPC_VFCMP_SEQ_S = 0x0c528000,
39
+extern uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
47
+ OPC_VFCMP_CLE_S = 0x0c530000,
40
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
48
+ OPC_VFCMP_SLE_S = 0x0c538000,
41
-extern uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
49
+ OPC_VFCMP_CUN_S = 0x0c540000,
42
+extern uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
50
+ OPC_VFCMP_SUN_S = 0x0c548000,
43
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
51
+ OPC_VFCMP_CULT_S = 0x0c550000,
44
-extern uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
52
+ OPC_VFCMP_SULT_S = 0x0c558000,
45
+extern uint16_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
53
+ OPC_VFCMP_CUEQ_S = 0x0c560000,
46
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
54
+ OPC_VFCMP_SUEQ_S = 0x0c568000,
47
extern uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
55
+ OPC_VFCMP_CULE_S = 0x0c570000,
48
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
56
+ OPC_VFCMP_SULE_S = 0x0c578000,
49
@@ -XXX,XX +XXX,XX @@ extern uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
57
+ OPC_VFCMP_CNE_S = 0x0c580000,
50
extern uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
58
+ OPC_VFCMP_SNE_S = 0x0c588000,
51
hwaddr addr, MemTxAttrs attrs, MemTxResult *result);
59
+ OPC_VFCMP_COR_S = 0x0c5a0000,
52
extern void glue(address_space_stb, SUFFIX)(ARG1_DECL,
60
+ OPC_VFCMP_SOR_S = 0x0c5a8000,
53
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
61
+ OPC_VFCMP_CUNE_S = 0x0c5c0000,
54
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result);
62
+ OPC_VFCMP_SUNE_S = 0x0c5c8000,
55
extern void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
63
+ OPC_VFCMP_CAF_D = 0x0c600000,
56
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
64
+ OPC_VFCMP_SAF_D = 0x0c608000,
57
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
65
+ OPC_VFCMP_CLT_D = 0x0c610000,
58
extern void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
66
+ OPC_VFCMP_SLT_D = 0x0c618000,
59
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
67
+ OPC_VFCMP_CEQ_D = 0x0c620000,
60
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result);
68
+ OPC_VFCMP_SEQ_D = 0x0c628000,
61
extern void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
69
+ OPC_VFCMP_CLE_D = 0x0c630000,
62
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result);
70
+ OPC_VFCMP_SLE_D = 0x0c638000,
63
extern void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
71
+ OPC_VFCMP_CUN_D = 0x0c640000,
64
diff --git a/memory_ldst.c.inc b/memory_ldst.c.inc
72
+ OPC_VFCMP_SUN_D = 0x0c648000,
65
index XXXXXXX..XXXXXXX 100644
73
+ OPC_VFCMP_CULT_D = 0x0c650000,
66
--- a/memory_ldst.c.inc
74
+ OPC_VFCMP_SULT_D = 0x0c658000,
67
+++ b/memory_ldst.c.inc
75
+ OPC_VFCMP_CUEQ_D = 0x0c660000,
68
@@ -XXX,XX +XXX,XX @@ uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
76
+ OPC_VFCMP_SUEQ_D = 0x0c668000,
69
DEVICE_BIG_ENDIAN);
77
+ OPC_VFCMP_CULE_D = 0x0c670000,
78
+ OPC_VFCMP_SULE_D = 0x0c678000,
79
+ OPC_VFCMP_CNE_D = 0x0c680000,
80
+ OPC_VFCMP_SNE_D = 0x0c688000,
81
+ OPC_VFCMP_COR_D = 0x0c6a0000,
82
+ OPC_VFCMP_SOR_D = 0x0c6a8000,
83
+ OPC_VFCMP_CUNE_D = 0x0c6c0000,
84
+ OPC_VFCMP_SUNE_D = 0x0c6c8000,
85
+ OPC_VBITSEL_V = 0x0d100000,
86
+ OPC_VSHUF_B = 0x0d500000,
87
OPC_ADDU16I_D = 0x10000000,
88
OPC_LU12I_W = 0x14000000,
89
OPC_CU32I_D = 0x16000000,
90
@@ -XXX,XX +XXX,XX @@ typedef enum {
91
OPC_LD_BU = 0x2a000000,
92
OPC_LD_HU = 0x2a400000,
93
OPC_LD_WU = 0x2a800000,
94
+ OPC_VLD = 0x2c000000,
95
+ OPC_VST = 0x2c400000,
96
+ OPC_VLDREPL_D = 0x30100000,
97
+ OPC_VLDREPL_W = 0x30200000,
98
+ OPC_VLDREPL_H = 0x30400000,
99
+ OPC_VLDREPL_B = 0x30800000,
100
+ OPC_VSTELM_D = 0x31100000,
101
+ OPC_VSTELM_W = 0x31200000,
102
+ OPC_VSTELM_H = 0x31400000,
103
+ OPC_VSTELM_B = 0x31800000,
104
OPC_LDX_B = 0x38000000,
105
OPC_LDX_H = 0x38040000,
106
OPC_LDX_W = 0x38080000,
107
@@ -XXX,XX +XXX,XX @@ typedef enum {
108
OPC_LDX_BU = 0x38200000,
109
OPC_LDX_HU = 0x38240000,
110
OPC_LDX_WU = 0x38280000,
111
+ OPC_VLDX = 0x38400000,
112
+ OPC_VSTX = 0x38440000,
113
OPC_DBAR = 0x38720000,
114
OPC_JIRL = 0x4c000000,
115
OPC_B = 0x50000000,
116
@@ -XXX,XX +XXX,XX @@ typedef enum {
117
OPC_BLE = 0x64000000,
118
OPC_BGTU = 0x68000000,
119
OPC_BLEU = 0x6c000000,
120
+ OPC_VSEQ_B = 0x70000000,
121
+ OPC_VSEQ_H = 0x70008000,
122
+ OPC_VSEQ_W = 0x70010000,
123
+ OPC_VSEQ_D = 0x70018000,
124
+ OPC_VSLE_B = 0x70020000,
125
+ OPC_VSLE_H = 0x70028000,
126
+ OPC_VSLE_W = 0x70030000,
127
+ OPC_VSLE_D = 0x70038000,
128
+ OPC_VSLE_BU = 0x70040000,
129
+ OPC_VSLE_HU = 0x70048000,
130
+ OPC_VSLE_WU = 0x70050000,
131
+ OPC_VSLE_DU = 0x70058000,
132
+ OPC_VSLT_B = 0x70060000,
133
+ OPC_VSLT_H = 0x70068000,
134
+ OPC_VSLT_W = 0x70070000,
135
+ OPC_VSLT_D = 0x70078000,
136
+ OPC_VSLT_BU = 0x70080000,
137
+ OPC_VSLT_HU = 0x70088000,
138
+ OPC_VSLT_WU = 0x70090000,
139
+ OPC_VSLT_DU = 0x70098000,
140
+ OPC_VADD_B = 0x700a0000,
141
+ OPC_VADD_H = 0x700a8000,
142
+ OPC_VADD_W = 0x700b0000,
143
+ OPC_VADD_D = 0x700b8000,
144
+ OPC_VSUB_B = 0x700c0000,
145
+ OPC_VSUB_H = 0x700c8000,
146
+ OPC_VSUB_W = 0x700d0000,
147
+ OPC_VSUB_D = 0x700d8000,
148
+ OPC_VADDWEV_H_B = 0x701e0000,
149
+ OPC_VADDWEV_W_H = 0x701e8000,
150
+ OPC_VADDWEV_D_W = 0x701f0000,
151
+ OPC_VADDWEV_Q_D = 0x701f8000,
152
+ OPC_VSUBWEV_H_B = 0x70200000,
153
+ OPC_VSUBWEV_W_H = 0x70208000,
154
+ OPC_VSUBWEV_D_W = 0x70210000,
155
+ OPC_VSUBWEV_Q_D = 0x70218000,
156
+ OPC_VADDWOD_H_B = 0x70220000,
157
+ OPC_VADDWOD_W_H = 0x70228000,
158
+ OPC_VADDWOD_D_W = 0x70230000,
159
+ OPC_VADDWOD_Q_D = 0x70238000,
160
+ OPC_VSUBWOD_H_B = 0x70240000,
161
+ OPC_VSUBWOD_W_H = 0x70248000,
162
+ OPC_VSUBWOD_D_W = 0x70250000,
163
+ OPC_VSUBWOD_Q_D = 0x70258000,
164
+ OPC_VADDWEV_H_BU = 0x702e0000,
165
+ OPC_VADDWEV_W_HU = 0x702e8000,
166
+ OPC_VADDWEV_D_WU = 0x702f0000,
167
+ OPC_VADDWEV_Q_DU = 0x702f8000,
168
+ OPC_VSUBWEV_H_BU = 0x70300000,
169
+ OPC_VSUBWEV_W_HU = 0x70308000,
170
+ OPC_VSUBWEV_D_WU = 0x70310000,
171
+ OPC_VSUBWEV_Q_DU = 0x70318000,
172
+ OPC_VADDWOD_H_BU = 0x70320000,
173
+ OPC_VADDWOD_W_HU = 0x70328000,
174
+ OPC_VADDWOD_D_WU = 0x70330000,
175
+ OPC_VADDWOD_Q_DU = 0x70338000,
176
+ OPC_VSUBWOD_H_BU = 0x70340000,
177
+ OPC_VSUBWOD_W_HU = 0x70348000,
178
+ OPC_VSUBWOD_D_WU = 0x70350000,
179
+ OPC_VSUBWOD_Q_DU = 0x70358000,
180
+ OPC_VADDWEV_H_BU_B = 0x703e0000,
181
+ OPC_VADDWEV_W_HU_H = 0x703e8000,
182
+ OPC_VADDWEV_D_WU_W = 0x703f0000,
183
+ OPC_VADDWEV_Q_DU_D = 0x703f8000,
184
+ OPC_VADDWOD_H_BU_B = 0x70400000,
185
+ OPC_VADDWOD_W_HU_H = 0x70408000,
186
+ OPC_VADDWOD_D_WU_W = 0x70410000,
187
+ OPC_VADDWOD_Q_DU_D = 0x70418000,
188
+ OPC_VSADD_B = 0x70460000,
189
+ OPC_VSADD_H = 0x70468000,
190
+ OPC_VSADD_W = 0x70470000,
191
+ OPC_VSADD_D = 0x70478000,
192
+ OPC_VSSUB_B = 0x70480000,
193
+ OPC_VSSUB_H = 0x70488000,
194
+ OPC_VSSUB_W = 0x70490000,
195
+ OPC_VSSUB_D = 0x70498000,
196
+ OPC_VSADD_BU = 0x704a0000,
197
+ OPC_VSADD_HU = 0x704a8000,
198
+ OPC_VSADD_WU = 0x704b0000,
199
+ OPC_VSADD_DU = 0x704b8000,
200
+ OPC_VSSUB_BU = 0x704c0000,
201
+ OPC_VSSUB_HU = 0x704c8000,
202
+ OPC_VSSUB_WU = 0x704d0000,
203
+ OPC_VSSUB_DU = 0x704d8000,
204
+ OPC_VHADDW_H_B = 0x70540000,
205
+ OPC_VHADDW_W_H = 0x70548000,
206
+ OPC_VHADDW_D_W = 0x70550000,
207
+ OPC_VHADDW_Q_D = 0x70558000,
208
+ OPC_VHSUBW_H_B = 0x70560000,
209
+ OPC_VHSUBW_W_H = 0x70568000,
210
+ OPC_VHSUBW_D_W = 0x70570000,
211
+ OPC_VHSUBW_Q_D = 0x70578000,
212
+ OPC_VHADDW_HU_BU = 0x70580000,
213
+ OPC_VHADDW_WU_HU = 0x70588000,
214
+ OPC_VHADDW_DU_WU = 0x70590000,
215
+ OPC_VHADDW_QU_DU = 0x70598000,
216
+ OPC_VHSUBW_HU_BU = 0x705a0000,
217
+ OPC_VHSUBW_WU_HU = 0x705a8000,
218
+ OPC_VHSUBW_DU_WU = 0x705b0000,
219
+ OPC_VHSUBW_QU_DU = 0x705b8000,
220
+ OPC_VADDA_B = 0x705c0000,
221
+ OPC_VADDA_H = 0x705c8000,
222
+ OPC_VADDA_W = 0x705d0000,
223
+ OPC_VADDA_D = 0x705d8000,
224
+ OPC_VABSD_B = 0x70600000,
225
+ OPC_VABSD_H = 0x70608000,
226
+ OPC_VABSD_W = 0x70610000,
227
+ OPC_VABSD_D = 0x70618000,
228
+ OPC_VABSD_BU = 0x70620000,
229
+ OPC_VABSD_HU = 0x70628000,
230
+ OPC_VABSD_WU = 0x70630000,
231
+ OPC_VABSD_DU = 0x70638000,
232
+ OPC_VAVG_B = 0x70640000,
233
+ OPC_VAVG_H = 0x70648000,
234
+ OPC_VAVG_W = 0x70650000,
235
+ OPC_VAVG_D = 0x70658000,
236
+ OPC_VAVG_BU = 0x70660000,
237
+ OPC_VAVG_HU = 0x70668000,
238
+ OPC_VAVG_WU = 0x70670000,
239
+ OPC_VAVG_DU = 0x70678000,
240
+ OPC_VAVGR_B = 0x70680000,
241
+ OPC_VAVGR_H = 0x70688000,
242
+ OPC_VAVGR_W = 0x70690000,
243
+ OPC_VAVGR_D = 0x70698000,
244
+ OPC_VAVGR_BU = 0x706a0000,
245
+ OPC_VAVGR_HU = 0x706a8000,
246
+ OPC_VAVGR_WU = 0x706b0000,
247
+ OPC_VAVGR_DU = 0x706b8000,
248
+ OPC_VMAX_B = 0x70700000,
249
+ OPC_VMAX_H = 0x70708000,
250
+ OPC_VMAX_W = 0x70710000,
251
+ OPC_VMAX_D = 0x70718000,
252
+ OPC_VMIN_B = 0x70720000,
253
+ OPC_VMIN_H = 0x70728000,
254
+ OPC_VMIN_W = 0x70730000,
255
+ OPC_VMIN_D = 0x70738000,
256
+ OPC_VMAX_BU = 0x70740000,
257
+ OPC_VMAX_HU = 0x70748000,
258
+ OPC_VMAX_WU = 0x70750000,
259
+ OPC_VMAX_DU = 0x70758000,
260
+ OPC_VMIN_BU = 0x70760000,
261
+ OPC_VMIN_HU = 0x70768000,
262
+ OPC_VMIN_WU = 0x70770000,
263
+ OPC_VMIN_DU = 0x70778000,
264
+ OPC_VMUL_B = 0x70840000,
265
+ OPC_VMUL_H = 0x70848000,
266
+ OPC_VMUL_W = 0x70850000,
267
+ OPC_VMUL_D = 0x70858000,
268
+ OPC_VMUH_B = 0x70860000,
269
+ OPC_VMUH_H = 0x70868000,
270
+ OPC_VMUH_W = 0x70870000,
271
+ OPC_VMUH_D = 0x70878000,
272
+ OPC_VMUH_BU = 0x70880000,
273
+ OPC_VMUH_HU = 0x70888000,
274
+ OPC_VMUH_WU = 0x70890000,
275
+ OPC_VMUH_DU = 0x70898000,
276
+ OPC_VMULWEV_H_B = 0x70900000,
277
+ OPC_VMULWEV_W_H = 0x70908000,
278
+ OPC_VMULWEV_D_W = 0x70910000,
279
+ OPC_VMULWEV_Q_D = 0x70918000,
280
+ OPC_VMULWOD_H_B = 0x70920000,
281
+ OPC_VMULWOD_W_H = 0x70928000,
282
+ OPC_VMULWOD_D_W = 0x70930000,
283
+ OPC_VMULWOD_Q_D = 0x70938000,
284
+ OPC_VMULWEV_H_BU = 0x70980000,
285
+ OPC_VMULWEV_W_HU = 0x70988000,
286
+ OPC_VMULWEV_D_WU = 0x70990000,
287
+ OPC_VMULWEV_Q_DU = 0x70998000,
288
+ OPC_VMULWOD_H_BU = 0x709a0000,
289
+ OPC_VMULWOD_W_HU = 0x709a8000,
290
+ OPC_VMULWOD_D_WU = 0x709b0000,
291
+ OPC_VMULWOD_Q_DU = 0x709b8000,
292
+ OPC_VMULWEV_H_BU_B = 0x70a00000,
293
+ OPC_VMULWEV_W_HU_H = 0x70a08000,
294
+ OPC_VMULWEV_D_WU_W = 0x70a10000,
295
+ OPC_VMULWEV_Q_DU_D = 0x70a18000,
296
+ OPC_VMULWOD_H_BU_B = 0x70a20000,
297
+ OPC_VMULWOD_W_HU_H = 0x70a28000,
298
+ OPC_VMULWOD_D_WU_W = 0x70a30000,
299
+ OPC_VMULWOD_Q_DU_D = 0x70a38000,
300
+ OPC_VMADD_B = 0x70a80000,
301
+ OPC_VMADD_H = 0x70a88000,
302
+ OPC_VMADD_W = 0x70a90000,
303
+ OPC_VMADD_D = 0x70a98000,
304
+ OPC_VMSUB_B = 0x70aa0000,
305
+ OPC_VMSUB_H = 0x70aa8000,
306
+ OPC_VMSUB_W = 0x70ab0000,
307
+ OPC_VMSUB_D = 0x70ab8000,
308
+ OPC_VMADDWEV_H_B = 0x70ac0000,
309
+ OPC_VMADDWEV_W_H = 0x70ac8000,
310
+ OPC_VMADDWEV_D_W = 0x70ad0000,
311
+ OPC_VMADDWEV_Q_D = 0x70ad8000,
312
+ OPC_VMADDWOD_H_B = 0x70ae0000,
313
+ OPC_VMADDWOD_W_H = 0x70ae8000,
314
+ OPC_VMADDWOD_D_W = 0x70af0000,
315
+ OPC_VMADDWOD_Q_D = 0x70af8000,
316
+ OPC_VMADDWEV_H_BU = 0x70b40000,
317
+ OPC_VMADDWEV_W_HU = 0x70b48000,
318
+ OPC_VMADDWEV_D_WU = 0x70b50000,
319
+ OPC_VMADDWEV_Q_DU = 0x70b58000,
320
+ OPC_VMADDWOD_H_BU = 0x70b60000,
321
+ OPC_VMADDWOD_W_HU = 0x70b68000,
322
+ OPC_VMADDWOD_D_WU = 0x70b70000,
323
+ OPC_VMADDWOD_Q_DU = 0x70b78000,
324
+ OPC_VMADDWEV_H_BU_B = 0x70bc0000,
325
+ OPC_VMADDWEV_W_HU_H = 0x70bc8000,
326
+ OPC_VMADDWEV_D_WU_W = 0x70bd0000,
327
+ OPC_VMADDWEV_Q_DU_D = 0x70bd8000,
328
+ OPC_VMADDWOD_H_BU_B = 0x70be0000,
329
+ OPC_VMADDWOD_W_HU_H = 0x70be8000,
330
+ OPC_VMADDWOD_D_WU_W = 0x70bf0000,
331
+ OPC_VMADDWOD_Q_DU_D = 0x70bf8000,
332
+ OPC_VDIV_B = 0x70e00000,
333
+ OPC_VDIV_H = 0x70e08000,
334
+ OPC_VDIV_W = 0x70e10000,
335
+ OPC_VDIV_D = 0x70e18000,
336
+ OPC_VMOD_B = 0x70e20000,
337
+ OPC_VMOD_H = 0x70e28000,
338
+ OPC_VMOD_W = 0x70e30000,
339
+ OPC_VMOD_D = 0x70e38000,
340
+ OPC_VDIV_BU = 0x70e40000,
341
+ OPC_VDIV_HU = 0x70e48000,
342
+ OPC_VDIV_WU = 0x70e50000,
343
+ OPC_VDIV_DU = 0x70e58000,
344
+ OPC_VMOD_BU = 0x70e60000,
345
+ OPC_VMOD_HU = 0x70e68000,
346
+ OPC_VMOD_WU = 0x70e70000,
347
+ OPC_VMOD_DU = 0x70e78000,
348
+ OPC_VSLL_B = 0x70e80000,
349
+ OPC_VSLL_H = 0x70e88000,
350
+ OPC_VSLL_W = 0x70e90000,
351
+ OPC_VSLL_D = 0x70e98000,
352
+ OPC_VSRL_B = 0x70ea0000,
353
+ OPC_VSRL_H = 0x70ea8000,
354
+ OPC_VSRL_W = 0x70eb0000,
355
+ OPC_VSRL_D = 0x70eb8000,
356
+ OPC_VSRA_B = 0x70ec0000,
357
+ OPC_VSRA_H = 0x70ec8000,
358
+ OPC_VSRA_W = 0x70ed0000,
359
+ OPC_VSRA_D = 0x70ed8000,
360
+ OPC_VROTR_B = 0x70ee0000,
361
+ OPC_VROTR_H = 0x70ee8000,
362
+ OPC_VROTR_W = 0x70ef0000,
363
+ OPC_VROTR_D = 0x70ef8000,
364
+ OPC_VSRLR_B = 0x70f00000,
365
+ OPC_VSRLR_H = 0x70f08000,
366
+ OPC_VSRLR_W = 0x70f10000,
367
+ OPC_VSRLR_D = 0x70f18000,
368
+ OPC_VSRAR_B = 0x70f20000,
369
+ OPC_VSRAR_H = 0x70f28000,
370
+ OPC_VSRAR_W = 0x70f30000,
371
+ OPC_VSRAR_D = 0x70f38000,
372
+ OPC_VSRLN_B_H = 0x70f48000,
373
+ OPC_VSRLN_H_W = 0x70f50000,
374
+ OPC_VSRLN_W_D = 0x70f58000,
375
+ OPC_VSRAN_B_H = 0x70f68000,
376
+ OPC_VSRAN_H_W = 0x70f70000,
377
+ OPC_VSRAN_W_D = 0x70f78000,
378
+ OPC_VSRLRN_B_H = 0x70f88000,
379
+ OPC_VSRLRN_H_W = 0x70f90000,
380
+ OPC_VSRLRN_W_D = 0x70f98000,
381
+ OPC_VSRARN_B_H = 0x70fa8000,
382
+ OPC_VSRARN_H_W = 0x70fb0000,
383
+ OPC_VSRARN_W_D = 0x70fb8000,
384
+ OPC_VSSRLN_B_H = 0x70fc8000,
385
+ OPC_VSSRLN_H_W = 0x70fd0000,
386
+ OPC_VSSRLN_W_D = 0x70fd8000,
387
+ OPC_VSSRAN_B_H = 0x70fe8000,
388
+ OPC_VSSRAN_H_W = 0x70ff0000,
389
+ OPC_VSSRAN_W_D = 0x70ff8000,
390
+ OPC_VSSRLRN_B_H = 0x71008000,
391
+ OPC_VSSRLRN_H_W = 0x71010000,
392
+ OPC_VSSRLRN_W_D = 0x71018000,
393
+ OPC_VSSRARN_B_H = 0x71028000,
394
+ OPC_VSSRARN_H_W = 0x71030000,
395
+ OPC_VSSRARN_W_D = 0x71038000,
396
+ OPC_VSSRLN_BU_H = 0x71048000,
397
+ OPC_VSSRLN_HU_W = 0x71050000,
398
+ OPC_VSSRLN_WU_D = 0x71058000,
399
+ OPC_VSSRAN_BU_H = 0x71068000,
400
+ OPC_VSSRAN_HU_W = 0x71070000,
401
+ OPC_VSSRAN_WU_D = 0x71078000,
402
+ OPC_VSSRLRN_BU_H = 0x71088000,
403
+ OPC_VSSRLRN_HU_W = 0x71090000,
404
+ OPC_VSSRLRN_WU_D = 0x71098000,
405
+ OPC_VSSRARN_BU_H = 0x710a8000,
406
+ OPC_VSSRARN_HU_W = 0x710b0000,
407
+ OPC_VSSRARN_WU_D = 0x710b8000,
408
+ OPC_VBITCLR_B = 0x710c0000,
409
+ OPC_VBITCLR_H = 0x710c8000,
410
+ OPC_VBITCLR_W = 0x710d0000,
411
+ OPC_VBITCLR_D = 0x710d8000,
412
+ OPC_VBITSET_B = 0x710e0000,
413
+ OPC_VBITSET_H = 0x710e8000,
414
+ OPC_VBITSET_W = 0x710f0000,
415
+ OPC_VBITSET_D = 0x710f8000,
416
+ OPC_VBITREV_B = 0x71100000,
417
+ OPC_VBITREV_H = 0x71108000,
418
+ OPC_VBITREV_W = 0x71110000,
419
+ OPC_VBITREV_D = 0x71118000,
420
+ OPC_VPACKEV_B = 0x71160000,
421
+ OPC_VPACKEV_H = 0x71168000,
422
+ OPC_VPACKEV_W = 0x71170000,
423
+ OPC_VPACKEV_D = 0x71178000,
424
+ OPC_VPACKOD_B = 0x71180000,
425
+ OPC_VPACKOD_H = 0x71188000,
426
+ OPC_VPACKOD_W = 0x71190000,
427
+ OPC_VPACKOD_D = 0x71198000,
428
+ OPC_VILVL_B = 0x711a0000,
429
+ OPC_VILVL_H = 0x711a8000,
430
+ OPC_VILVL_W = 0x711b0000,
431
+ OPC_VILVL_D = 0x711b8000,
432
+ OPC_VILVH_B = 0x711c0000,
433
+ OPC_VILVH_H = 0x711c8000,
434
+ OPC_VILVH_W = 0x711d0000,
435
+ OPC_VILVH_D = 0x711d8000,
436
+ OPC_VPICKEV_B = 0x711e0000,
437
+ OPC_VPICKEV_H = 0x711e8000,
438
+ OPC_VPICKEV_W = 0x711f0000,
439
+ OPC_VPICKEV_D = 0x711f8000,
440
+ OPC_VPICKOD_B = 0x71200000,
441
+ OPC_VPICKOD_H = 0x71208000,
442
+ OPC_VPICKOD_W = 0x71210000,
443
+ OPC_VPICKOD_D = 0x71218000,
444
+ OPC_VREPLVE_B = 0x71220000,
445
+ OPC_VREPLVE_H = 0x71228000,
446
+ OPC_VREPLVE_W = 0x71230000,
447
+ OPC_VREPLVE_D = 0x71238000,
448
+ OPC_VAND_V = 0x71260000,
449
+ OPC_VOR_V = 0x71268000,
450
+ OPC_VXOR_V = 0x71270000,
451
+ OPC_VNOR_V = 0x71278000,
452
+ OPC_VANDN_V = 0x71280000,
453
+ OPC_VORN_V = 0x71288000,
454
+ OPC_VFRSTP_B = 0x712b0000,
455
+ OPC_VFRSTP_H = 0x712b8000,
456
+ OPC_VADD_Q = 0x712d0000,
457
+ OPC_VSUB_Q = 0x712d8000,
458
+ OPC_VSIGNCOV_B = 0x712e0000,
459
+ OPC_VSIGNCOV_H = 0x712e8000,
460
+ OPC_VSIGNCOV_W = 0x712f0000,
461
+ OPC_VSIGNCOV_D = 0x712f8000,
462
+ OPC_VFADD_S = 0x71308000,
463
+ OPC_VFADD_D = 0x71310000,
464
+ OPC_VFSUB_S = 0x71328000,
465
+ OPC_VFSUB_D = 0x71330000,
466
+ OPC_VFMUL_S = 0x71388000,
467
+ OPC_VFMUL_D = 0x71390000,
468
+ OPC_VFDIV_S = 0x713a8000,
469
+ OPC_VFDIV_D = 0x713b0000,
470
+ OPC_VFMAX_S = 0x713c8000,
471
+ OPC_VFMAX_D = 0x713d0000,
472
+ OPC_VFMIN_S = 0x713e8000,
473
+ OPC_VFMIN_D = 0x713f0000,
474
+ OPC_VFMAXA_S = 0x71408000,
475
+ OPC_VFMAXA_D = 0x71410000,
476
+ OPC_VFMINA_S = 0x71428000,
477
+ OPC_VFMINA_D = 0x71430000,
478
+ OPC_VFCVT_H_S = 0x71460000,
479
+ OPC_VFCVT_S_D = 0x71468000,
480
+ OPC_VFFINT_S_L = 0x71480000,
481
+ OPC_VFTINT_W_D = 0x71498000,
482
+ OPC_VFTINTRM_W_D = 0x714a0000,
483
+ OPC_VFTINTRP_W_D = 0x714a8000,
484
+ OPC_VFTINTRZ_W_D = 0x714b0000,
485
+ OPC_VFTINTRNE_W_D = 0x714b8000,
486
+ OPC_VSHUF_H = 0x717a8000,
487
+ OPC_VSHUF_W = 0x717b0000,
488
+ OPC_VSHUF_D = 0x717b8000,
489
+ OPC_VSEQI_B = 0x72800000,
490
+ OPC_VSEQI_H = 0x72808000,
491
+ OPC_VSEQI_W = 0x72810000,
492
+ OPC_VSEQI_D = 0x72818000,
493
+ OPC_VSLEI_B = 0x72820000,
494
+ OPC_VSLEI_H = 0x72828000,
495
+ OPC_VSLEI_W = 0x72830000,
496
+ OPC_VSLEI_D = 0x72838000,
497
+ OPC_VSLEI_BU = 0x72840000,
498
+ OPC_VSLEI_HU = 0x72848000,
499
+ OPC_VSLEI_WU = 0x72850000,
500
+ OPC_VSLEI_DU = 0x72858000,
501
+ OPC_VSLTI_B = 0x72860000,
502
+ OPC_VSLTI_H = 0x72868000,
503
+ OPC_VSLTI_W = 0x72870000,
504
+ OPC_VSLTI_D = 0x72878000,
505
+ OPC_VSLTI_BU = 0x72880000,
506
+ OPC_VSLTI_HU = 0x72888000,
507
+ OPC_VSLTI_WU = 0x72890000,
508
+ OPC_VSLTI_DU = 0x72898000,
509
+ OPC_VADDI_BU = 0x728a0000,
510
+ OPC_VADDI_HU = 0x728a8000,
511
+ OPC_VADDI_WU = 0x728b0000,
512
+ OPC_VADDI_DU = 0x728b8000,
513
+ OPC_VSUBI_BU = 0x728c0000,
514
+ OPC_VSUBI_HU = 0x728c8000,
515
+ OPC_VSUBI_WU = 0x728d0000,
516
+ OPC_VSUBI_DU = 0x728d8000,
517
+ OPC_VBSLL_V = 0x728e0000,
518
+ OPC_VBSRL_V = 0x728e8000,
519
+ OPC_VMAXI_B = 0x72900000,
520
+ OPC_VMAXI_H = 0x72908000,
521
+ OPC_VMAXI_W = 0x72910000,
522
+ OPC_VMAXI_D = 0x72918000,
523
+ OPC_VMINI_B = 0x72920000,
524
+ OPC_VMINI_H = 0x72928000,
525
+ OPC_VMINI_W = 0x72930000,
526
+ OPC_VMINI_D = 0x72938000,
527
+ OPC_VMAXI_BU = 0x72940000,
528
+ OPC_VMAXI_HU = 0x72948000,
529
+ OPC_VMAXI_WU = 0x72950000,
530
+ OPC_VMAXI_DU = 0x72958000,
531
+ OPC_VMINI_BU = 0x72960000,
532
+ OPC_VMINI_HU = 0x72968000,
533
+ OPC_VMINI_WU = 0x72970000,
534
+ OPC_VMINI_DU = 0x72978000,
535
+ OPC_VFRSTPI_B = 0x729a0000,
536
+ OPC_VFRSTPI_H = 0x729a8000,
537
+ OPC_VCLO_B = 0x729c0000,
538
+ OPC_VCLO_H = 0x729c0400,
539
+ OPC_VCLO_W = 0x729c0800,
540
+ OPC_VCLO_D = 0x729c0c00,
541
+ OPC_VCLZ_B = 0x729c1000,
542
+ OPC_VCLZ_H = 0x729c1400,
543
+ OPC_VCLZ_W = 0x729c1800,
544
+ OPC_VCLZ_D = 0x729c1c00,
545
+ OPC_VPCNT_B = 0x729c2000,
546
+ OPC_VPCNT_H = 0x729c2400,
547
+ OPC_VPCNT_W = 0x729c2800,
548
+ OPC_VPCNT_D = 0x729c2c00,
549
+ OPC_VNEG_B = 0x729c3000,
550
+ OPC_VNEG_H = 0x729c3400,
551
+ OPC_VNEG_W = 0x729c3800,
552
+ OPC_VNEG_D = 0x729c3c00,
553
+ OPC_VMSKLTZ_B = 0x729c4000,
554
+ OPC_VMSKLTZ_H = 0x729c4400,
555
+ OPC_VMSKLTZ_W = 0x729c4800,
556
+ OPC_VMSKLTZ_D = 0x729c4c00,
557
+ OPC_VMSKGEZ_B = 0x729c5000,
558
+ OPC_VMSKNZ_B = 0x729c6000,
559
+ OPC_VSETEQZ_V = 0x729c9800,
560
+ OPC_VSETNEZ_V = 0x729c9c00,
561
+ OPC_VSETANYEQZ_B = 0x729ca000,
562
+ OPC_VSETANYEQZ_H = 0x729ca400,
563
+ OPC_VSETANYEQZ_W = 0x729ca800,
564
+ OPC_VSETANYEQZ_D = 0x729cac00,
565
+ OPC_VSETALLNEZ_B = 0x729cb000,
566
+ OPC_VSETALLNEZ_H = 0x729cb400,
567
+ OPC_VSETALLNEZ_W = 0x729cb800,
568
+ OPC_VSETALLNEZ_D = 0x729cbc00,
569
+ OPC_VFLOGB_S = 0x729cc400,
570
+ OPC_VFLOGB_D = 0x729cc800,
571
+ OPC_VFCLASS_S = 0x729cd400,
572
+ OPC_VFCLASS_D = 0x729cd800,
573
+ OPC_VFSQRT_S = 0x729ce400,
574
+ OPC_VFSQRT_D = 0x729ce800,
575
+ OPC_VFRECIP_S = 0x729cf400,
576
+ OPC_VFRECIP_D = 0x729cf800,
577
+ OPC_VFRSQRT_S = 0x729d0400,
578
+ OPC_VFRSQRT_D = 0x729d0800,
579
+ OPC_VFRINT_S = 0x729d3400,
580
+ OPC_VFRINT_D = 0x729d3800,
581
+ OPC_VFRINTRM_S = 0x729d4400,
582
+ OPC_VFRINTRM_D = 0x729d4800,
583
+ OPC_VFRINTRP_S = 0x729d5400,
584
+ OPC_VFRINTRP_D = 0x729d5800,
585
+ OPC_VFRINTRZ_S = 0x729d6400,
586
+ OPC_VFRINTRZ_D = 0x729d6800,
587
+ OPC_VFRINTRNE_S = 0x729d7400,
588
+ OPC_VFRINTRNE_D = 0x729d7800,
589
+ OPC_VFCVTL_S_H = 0x729de800,
590
+ OPC_VFCVTH_S_H = 0x729dec00,
591
+ OPC_VFCVTL_D_S = 0x729df000,
592
+ OPC_VFCVTH_D_S = 0x729df400,
593
+ OPC_VFFINT_S_W = 0x729e0000,
594
+ OPC_VFFINT_S_WU = 0x729e0400,
595
+ OPC_VFFINT_D_L = 0x729e0800,
596
+ OPC_VFFINT_D_LU = 0x729e0c00,
597
+ OPC_VFFINTL_D_W = 0x729e1000,
598
+ OPC_VFFINTH_D_W = 0x729e1400,
599
+ OPC_VFTINT_W_S = 0x729e3000,
600
+ OPC_VFTINT_L_D = 0x729e3400,
601
+ OPC_VFTINTRM_W_S = 0x729e3800,
602
+ OPC_VFTINTRM_L_D = 0x729e3c00,
603
+ OPC_VFTINTRP_W_S = 0x729e4000,
604
+ OPC_VFTINTRP_L_D = 0x729e4400,
605
+ OPC_VFTINTRZ_W_S = 0x729e4800,
606
+ OPC_VFTINTRZ_L_D = 0x729e4c00,
607
+ OPC_VFTINTRNE_W_S = 0x729e5000,
608
+ OPC_VFTINTRNE_L_D = 0x729e5400,
609
+ OPC_VFTINT_WU_S = 0x729e5800,
610
+ OPC_VFTINT_LU_D = 0x729e5c00,
611
+ OPC_VFTINTRZ_WU_S = 0x729e7000,
612
+ OPC_VFTINTRZ_LU_D = 0x729e7400,
613
+ OPC_VFTINTL_L_S = 0x729e8000,
614
+ OPC_VFTINTH_L_S = 0x729e8400,
615
+ OPC_VFTINTRML_L_S = 0x729e8800,
616
+ OPC_VFTINTRMH_L_S = 0x729e8c00,
617
+ OPC_VFTINTRPL_L_S = 0x729e9000,
618
+ OPC_VFTINTRPH_L_S = 0x729e9400,
619
+ OPC_VFTINTRZL_L_S = 0x729e9800,
620
+ OPC_VFTINTRZH_L_S = 0x729e9c00,
621
+ OPC_VFTINTRNEL_L_S = 0x729ea000,
622
+ OPC_VFTINTRNEH_L_S = 0x729ea400,
623
+ OPC_VEXTH_H_B = 0x729ee000,
624
+ OPC_VEXTH_W_H = 0x729ee400,
625
+ OPC_VEXTH_D_W = 0x729ee800,
626
+ OPC_VEXTH_Q_D = 0x729eec00,
627
+ OPC_VEXTH_HU_BU = 0x729ef000,
628
+ OPC_VEXTH_WU_HU = 0x729ef400,
629
+ OPC_VEXTH_DU_WU = 0x729ef800,
630
+ OPC_VEXTH_QU_DU = 0x729efc00,
631
+ OPC_VREPLGR2VR_B = 0x729f0000,
632
+ OPC_VREPLGR2VR_H = 0x729f0400,
633
+ OPC_VREPLGR2VR_W = 0x729f0800,
634
+ OPC_VREPLGR2VR_D = 0x729f0c00,
635
+ OPC_VROTRI_B = 0x72a02000,
636
+ OPC_VROTRI_H = 0x72a04000,
637
+ OPC_VROTRI_W = 0x72a08000,
638
+ OPC_VROTRI_D = 0x72a10000,
639
+ OPC_VSRLRI_B = 0x72a42000,
640
+ OPC_VSRLRI_H = 0x72a44000,
641
+ OPC_VSRLRI_W = 0x72a48000,
642
+ OPC_VSRLRI_D = 0x72a50000,
643
+ OPC_VSRARI_B = 0x72a82000,
644
+ OPC_VSRARI_H = 0x72a84000,
645
+ OPC_VSRARI_W = 0x72a88000,
646
+ OPC_VSRARI_D = 0x72a90000,
647
+ OPC_VINSGR2VR_B = 0x72eb8000,
648
+ OPC_VINSGR2VR_H = 0x72ebc000,
649
+ OPC_VINSGR2VR_W = 0x72ebe000,
650
+ OPC_VINSGR2VR_D = 0x72ebf000,
651
+ OPC_VPICKVE2GR_B = 0x72ef8000,
652
+ OPC_VPICKVE2GR_H = 0x72efc000,
653
+ OPC_VPICKVE2GR_W = 0x72efe000,
654
+ OPC_VPICKVE2GR_D = 0x72eff000,
655
+ OPC_VPICKVE2GR_BU = 0x72f38000,
656
+ OPC_VPICKVE2GR_HU = 0x72f3c000,
657
+ OPC_VPICKVE2GR_WU = 0x72f3e000,
658
+ OPC_VPICKVE2GR_DU = 0x72f3f000,
659
+ OPC_VREPLVEI_B = 0x72f78000,
660
+ OPC_VREPLVEI_H = 0x72f7c000,
661
+ OPC_VREPLVEI_W = 0x72f7e000,
662
+ OPC_VREPLVEI_D = 0x72f7f000,
663
+ OPC_VSLLWIL_H_B = 0x73082000,
664
+ OPC_VSLLWIL_W_H = 0x73084000,
665
+ OPC_VSLLWIL_D_W = 0x73088000,
666
+ OPC_VEXTL_Q_D = 0x73090000,
667
+ OPC_VSLLWIL_HU_BU = 0x730c2000,
668
+ OPC_VSLLWIL_WU_HU = 0x730c4000,
669
+ OPC_VSLLWIL_DU_WU = 0x730c8000,
670
+ OPC_VEXTL_QU_DU = 0x730d0000,
671
+ OPC_VBITCLRI_B = 0x73102000,
672
+ OPC_VBITCLRI_H = 0x73104000,
673
+ OPC_VBITCLRI_W = 0x73108000,
674
+ OPC_VBITCLRI_D = 0x73110000,
675
+ OPC_VBITSETI_B = 0x73142000,
676
+ OPC_VBITSETI_H = 0x73144000,
677
+ OPC_VBITSETI_W = 0x73148000,
678
+ OPC_VBITSETI_D = 0x73150000,
679
+ OPC_VBITREVI_B = 0x73182000,
680
+ OPC_VBITREVI_H = 0x73184000,
681
+ OPC_VBITREVI_W = 0x73188000,
682
+ OPC_VBITREVI_D = 0x73190000,
683
+ OPC_VSAT_B = 0x73242000,
684
+ OPC_VSAT_H = 0x73244000,
685
+ OPC_VSAT_W = 0x73248000,
686
+ OPC_VSAT_D = 0x73250000,
687
+ OPC_VSAT_BU = 0x73282000,
688
+ OPC_VSAT_HU = 0x73284000,
689
+ OPC_VSAT_WU = 0x73288000,
690
+ OPC_VSAT_DU = 0x73290000,
691
+ OPC_VSLLI_B = 0x732c2000,
692
+ OPC_VSLLI_H = 0x732c4000,
693
+ OPC_VSLLI_W = 0x732c8000,
694
+ OPC_VSLLI_D = 0x732d0000,
695
+ OPC_VSRLI_B = 0x73302000,
696
+ OPC_VSRLI_H = 0x73304000,
697
+ OPC_VSRLI_W = 0x73308000,
698
+ OPC_VSRLI_D = 0x73310000,
699
+ OPC_VSRAI_B = 0x73342000,
700
+ OPC_VSRAI_H = 0x73344000,
701
+ OPC_VSRAI_W = 0x73348000,
702
+ OPC_VSRAI_D = 0x73350000,
703
+ OPC_VSRLNI_B_H = 0x73404000,
704
+ OPC_VSRLNI_H_W = 0x73408000,
705
+ OPC_VSRLNI_W_D = 0x73410000,
706
+ OPC_VSRLNI_D_Q = 0x73420000,
707
+ OPC_VSRLRNI_B_H = 0x73444000,
708
+ OPC_VSRLRNI_H_W = 0x73448000,
709
+ OPC_VSRLRNI_W_D = 0x73450000,
710
+ OPC_VSRLRNI_D_Q = 0x73460000,
711
+ OPC_VSSRLNI_B_H = 0x73484000,
712
+ OPC_VSSRLNI_H_W = 0x73488000,
713
+ OPC_VSSRLNI_W_D = 0x73490000,
714
+ OPC_VSSRLNI_D_Q = 0x734a0000,
715
+ OPC_VSSRLNI_BU_H = 0x734c4000,
716
+ OPC_VSSRLNI_HU_W = 0x734c8000,
717
+ OPC_VSSRLNI_WU_D = 0x734d0000,
718
+ OPC_VSSRLNI_DU_Q = 0x734e0000,
719
+ OPC_VSSRLRNI_B_H = 0x73504000,
720
+ OPC_VSSRLRNI_H_W = 0x73508000,
721
+ OPC_VSSRLRNI_W_D = 0x73510000,
722
+ OPC_VSSRLRNI_D_Q = 0x73520000,
723
+ OPC_VSSRLRNI_BU_H = 0x73544000,
724
+ OPC_VSSRLRNI_HU_W = 0x73548000,
725
+ OPC_VSSRLRNI_WU_D = 0x73550000,
726
+ OPC_VSSRLRNI_DU_Q = 0x73560000,
727
+ OPC_VSRANI_B_H = 0x73584000,
728
+ OPC_VSRANI_H_W = 0x73588000,
729
+ OPC_VSRANI_W_D = 0x73590000,
730
+ OPC_VSRANI_D_Q = 0x735a0000,
731
+ OPC_VSRARNI_B_H = 0x735c4000,
732
+ OPC_VSRARNI_H_W = 0x735c8000,
733
+ OPC_VSRARNI_W_D = 0x735d0000,
734
+ OPC_VSRARNI_D_Q = 0x735e0000,
735
+ OPC_VSSRANI_B_H = 0x73604000,
736
+ OPC_VSSRANI_H_W = 0x73608000,
737
+ OPC_VSSRANI_W_D = 0x73610000,
738
+ OPC_VSSRANI_D_Q = 0x73620000,
739
+ OPC_VSSRANI_BU_H = 0x73644000,
740
+ OPC_VSSRANI_HU_W = 0x73648000,
741
+ OPC_VSSRANI_WU_D = 0x73650000,
742
+ OPC_VSSRANI_DU_Q = 0x73660000,
743
+ OPC_VSSRARNI_B_H = 0x73684000,
744
+ OPC_VSSRARNI_H_W = 0x73688000,
745
+ OPC_VSSRARNI_W_D = 0x73690000,
746
+ OPC_VSSRARNI_D_Q = 0x736a0000,
747
+ OPC_VSSRARNI_BU_H = 0x736c4000,
748
+ OPC_VSSRARNI_HU_W = 0x736c8000,
749
+ OPC_VSSRARNI_WU_D = 0x736d0000,
750
+ OPC_VSSRARNI_DU_Q = 0x736e0000,
751
+ OPC_VEXTRINS_D = 0x73800000,
752
+ OPC_VEXTRINS_W = 0x73840000,
753
+ OPC_VEXTRINS_H = 0x73880000,
754
+ OPC_VEXTRINS_B = 0x738c0000,
755
+ OPC_VSHUF4I_B = 0x73900000,
756
+ OPC_VSHUF4I_H = 0x73940000,
757
+ OPC_VSHUF4I_W = 0x73980000,
758
+ OPC_VSHUF4I_D = 0x739c0000,
759
+ OPC_VBITSELI_B = 0x73c40000,
760
+ OPC_VANDI_B = 0x73d00000,
761
+ OPC_VORI_B = 0x73d40000,
762
+ OPC_VXORI_B = 0x73d80000,
763
+ OPC_VNORI_B = 0x73dc0000,
764
+ OPC_VLDI = 0x73e00000,
765
+ OPC_VPERMI_W = 0x73e40000,
766
} LoongArchInsn;
767
768
static int32_t __attribute__((unused))
769
@@ -XXX,XX +XXX,XX @@ encode_djk_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k)
770
return opc | d | j << 5 | k << 10;
70
}
771
}
71
772
72
-uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
773
+static int32_t __attribute__((unused))
73
+uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
774
+encode_djka_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
74
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
775
+ uint32_t a)
776
+{
777
+ return opc | d | j << 5 | k << 10 | a << 15;
778
+}
779
+
780
static int32_t __attribute__((unused))
781
encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
782
uint32_t m)
783
@@ -XXX,XX +XXX,XX @@ encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
784
return opc | d | j << 5 | k << 10 | m << 16;
785
}
786
787
+static int32_t __attribute__((unused))
788
+encode_djkn_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
789
+ uint32_t n)
790
+{
791
+ return opc | d | j << 5 | k << 10 | n << 18;
792
+}
793
+
794
static int32_t __attribute__((unused))
795
encode_dk_slots(LoongArchInsn opc, uint32_t d, uint32_t k)
75
{
796
{
76
uint8_t *ptr;
797
return opc | d | k << 10;
77
@@ -XXX,XX +XXX,XX @@ uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
78
}
798
}
79
799
80
/* warning: addr must be aligned */
800
+static int32_t __attribute__((unused))
81
-static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
801
+encode_cdvj_insn(LoongArchInsn opc, TCGReg cd, TCGReg vj)
82
+static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
802
+{
83
hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
803
+ tcg_debug_assert(cd >= 0 && cd <= 0x7);
84
enum device_endian endian)
804
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
805
+ return encode_dj_slots(opc, cd, vj & 0x1f);
806
+}
807
+
808
static int32_t __attribute__((unused))
809
encode_dj_insn(LoongArchInsn opc, TCGReg d, TCGReg j)
85
{
810
{
86
@@ -XXX,XX +XXX,XX @@ static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
811
@@ -XXX,XX +XXX,XX @@ encode_dsj20_insn(LoongArchInsn opc, TCGReg d, int32_t sj20)
87
return val;
812
return encode_dj_slots(opc, d, sj20 & 0xfffff);
88
}
813
}
89
814
90
-uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
815
+static int32_t __attribute__((unused))
91
+uint16_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
816
+encode_dvjuk1_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk1)
92
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
817
+{
818
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
819
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
820
+ tcg_debug_assert(uk1 <= 0x1);
821
+ return encode_djk_slots(opc, d, vj & 0x1f, uk1);
822
+}
823
+
824
+static int32_t __attribute__((unused))
825
+encode_dvjuk2_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk2)
826
+{
827
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
828
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
829
+ tcg_debug_assert(uk2 <= 0x3);
830
+ return encode_djk_slots(opc, d, vj & 0x1f, uk2);
831
+}
832
+
833
+static int32_t __attribute__((unused))
834
+encode_dvjuk3_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk3)
835
+{
836
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
837
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
838
+ tcg_debug_assert(uk3 <= 0x7);
839
+ return encode_djk_slots(opc, d, vj & 0x1f, uk3);
840
+}
841
+
842
+static int32_t __attribute__((unused))
843
+encode_dvjuk4_insn(LoongArchInsn opc, TCGReg d, TCGReg vj, uint32_t uk4)
844
+{
845
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
846
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
847
+ tcg_debug_assert(uk4 <= 0xf);
848
+ return encode_djk_slots(opc, d, vj & 0x1f, uk4);
849
+}
850
+
851
static int32_t __attribute__((unused))
852
encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16)
93
{
853
{
94
return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
854
@@ -XXX,XX +XXX,XX @@ encode_ud15_insn(LoongArchInsn opc, uint32_t ud15)
95
DEVICE_NATIVE_ENDIAN);
855
return encode_d_slot(opc, ud15);
96
}
856
}
97
857
98
-uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
858
+static int32_t __attribute__((unused))
99
+uint16_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
859
+encode_vdj_insn(LoongArchInsn opc, TCGReg vd, TCGReg j)
100
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
860
+{
101
{
861
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
102
return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
862
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
103
DEVICE_LITTLE_ENDIAN);
863
+ return encode_dj_slots(opc, vd & 0x1f, j);
864
+}
865
+
866
+static int32_t __attribute__((unused))
867
+encode_vdjk_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, TCGReg k)
868
+{
869
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
870
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
871
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
872
+ return encode_djk_slots(opc, vd & 0x1f, j, k);
873
+}
874
+
875
+static int32_t __attribute__((unused))
876
+encode_vdjsk10_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk10)
877
+{
878
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
879
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
880
+ tcg_debug_assert(sk10 >= -0x200 && sk10 <= 0x1ff);
881
+ return encode_djk_slots(opc, vd & 0x1f, j, sk10 & 0x3ff);
882
+}
883
+
884
+static int32_t __attribute__((unused))
885
+encode_vdjsk11_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk11)
886
+{
887
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
888
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
889
+ tcg_debug_assert(sk11 >= -0x400 && sk11 <= 0x3ff);
890
+ return encode_djk_slots(opc, vd & 0x1f, j, sk11 & 0x7ff);
891
+}
892
+
893
+static int32_t __attribute__((unused))
894
+encode_vdjsk12_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk12)
895
+{
896
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
897
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
898
+ tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff);
899
+ return encode_djk_slots(opc, vd & 0x1f, j, sk12 & 0xfff);
900
+}
901
+
902
+static int32_t __attribute__((unused))
903
+encode_vdjsk8un1_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
904
+ uint32_t un1)
905
+{
906
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
907
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
908
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
909
+ tcg_debug_assert(un1 <= 0x1);
910
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un1);
911
+}
912
+
913
+static int32_t __attribute__((unused))
914
+encode_vdjsk8un2_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
915
+ uint32_t un2)
916
+{
917
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
918
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
919
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
920
+ tcg_debug_assert(un2 <= 0x3);
921
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un2);
922
+}
923
+
924
+static int32_t __attribute__((unused))
925
+encode_vdjsk8un3_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
926
+ uint32_t un3)
927
+{
928
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
929
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
930
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
931
+ tcg_debug_assert(un3 <= 0x7);
932
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un3);
933
+}
934
+
935
+static int32_t __attribute__((unused))
936
+encode_vdjsk8un4_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk8,
937
+ uint32_t un4)
938
+{
939
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
940
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
941
+ tcg_debug_assert(sk8 >= -0x80 && sk8 <= 0x7f);
942
+ tcg_debug_assert(un4 <= 0xf);
943
+ return encode_djkn_slots(opc, vd & 0x1f, j, sk8 & 0xff, un4);
944
+}
945
+
946
+static int32_t __attribute__((unused))
947
+encode_vdjsk9_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, int32_t sk9)
948
+{
949
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
950
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
951
+ tcg_debug_assert(sk9 >= -0x100 && sk9 <= 0xff);
952
+ return encode_djk_slots(opc, vd & 0x1f, j, sk9 & 0x1ff);
953
+}
954
+
955
+static int32_t __attribute__((unused))
956
+encode_vdjuk1_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk1)
957
+{
958
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
959
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
960
+ tcg_debug_assert(uk1 <= 0x1);
961
+ return encode_djk_slots(opc, vd & 0x1f, j, uk1);
962
+}
963
+
964
+static int32_t __attribute__((unused))
965
+encode_vdjuk2_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk2)
966
+{
967
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
968
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
969
+ tcg_debug_assert(uk2 <= 0x3);
970
+ return encode_djk_slots(opc, vd & 0x1f, j, uk2);
971
+}
972
+
973
+static int32_t __attribute__((unused))
974
+encode_vdjuk3_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk3)
975
+{
976
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
977
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
978
+ tcg_debug_assert(uk3 <= 0x7);
979
+ return encode_djk_slots(opc, vd & 0x1f, j, uk3);
980
+}
981
+
982
+static int32_t __attribute__((unused))
983
+encode_vdjuk4_insn(LoongArchInsn opc, TCGReg vd, TCGReg j, uint32_t uk4)
984
+{
985
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
986
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
987
+ tcg_debug_assert(uk4 <= 0xf);
988
+ return encode_djk_slots(opc, vd & 0x1f, j, uk4);
989
+}
990
+
991
+static int32_t __attribute__((unused))
992
+encode_vdsj13_insn(LoongArchInsn opc, TCGReg vd, int32_t sj13)
993
+{
994
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
995
+ tcg_debug_assert(sj13 >= -0x1000 && sj13 <= 0xfff);
996
+ return encode_dj_slots(opc, vd & 0x1f, sj13 & 0x1fff);
997
+}
998
+
999
+static int32_t __attribute__((unused))
1000
+encode_vdvj_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj)
1001
+{
1002
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1003
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1004
+ return encode_dj_slots(opc, vd & 0x1f, vj & 0x1f);
1005
+}
1006
+
1007
+static int32_t __attribute__((unused))
1008
+encode_vdvjk_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg k)
1009
+{
1010
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1011
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1012
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
1013
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, k);
1014
+}
1015
+
1016
+static int32_t __attribute__((unused))
1017
+encode_vdvjsk5_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, int32_t sk5)
1018
+{
1019
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1020
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1021
+ tcg_debug_assert(sk5 >= -0x10 && sk5 <= 0xf);
1022
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, sk5 & 0x1f);
1023
+}
1024
+
1025
+static int32_t __attribute__((unused))
1026
+encode_vdvjuk1_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk1)
1027
+{
1028
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1029
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1030
+ tcg_debug_assert(uk1 <= 0x1);
1031
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk1);
1032
+}
1033
+
1034
+static int32_t __attribute__((unused))
1035
+encode_vdvjuk2_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk2)
1036
+{
1037
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1038
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1039
+ tcg_debug_assert(uk2 <= 0x3);
1040
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk2);
1041
+}
1042
+
1043
+static int32_t __attribute__((unused))
1044
+encode_vdvjuk3_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk3)
1045
+{
1046
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1047
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1048
+ tcg_debug_assert(uk3 <= 0x7);
1049
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk3);
1050
+}
1051
+
1052
+static int32_t __attribute__((unused))
1053
+encode_vdvjuk4_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk4)
1054
+{
1055
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1056
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1057
+ tcg_debug_assert(uk4 <= 0xf);
1058
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk4);
1059
+}
1060
+
1061
+static int32_t __attribute__((unused))
1062
+encode_vdvjuk5_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk5)
1063
+{
1064
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1065
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1066
+ tcg_debug_assert(uk5 <= 0x1f);
1067
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk5);
1068
+}
1069
+
1070
+static int32_t __attribute__((unused))
1071
+encode_vdvjuk6_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk6)
1072
+{
1073
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1074
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1075
+ tcg_debug_assert(uk6 <= 0x3f);
1076
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk6);
1077
+}
1078
+
1079
+static int32_t __attribute__((unused))
1080
+encode_vdvjuk7_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk7)
1081
+{
1082
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1083
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1084
+ tcg_debug_assert(uk7 <= 0x7f);
1085
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk7);
1086
+}
1087
+
1088
+static int32_t __attribute__((unused))
1089
+encode_vdvjuk8_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, uint32_t uk8)
1090
+{
1091
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1092
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1093
+ tcg_debug_assert(uk8 <= 0xff);
1094
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, uk8);
1095
+}
1096
+
1097
+static int32_t __attribute__((unused))
1098
+encode_vdvjvk_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg vk)
1099
+{
1100
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1101
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1102
+ tcg_debug_assert(vk >= 0x20 && vk <= 0x3f);
1103
+ return encode_djk_slots(opc, vd & 0x1f, vj & 0x1f, vk & 0x1f);
1104
+}
1105
+
1106
+static int32_t __attribute__((unused))
1107
+encode_vdvjvkva_insn(LoongArchInsn opc, TCGReg vd, TCGReg vj, TCGReg vk,
1108
+ TCGReg va)
1109
+{
1110
+ tcg_debug_assert(vd >= 0x20 && vd <= 0x3f);
1111
+ tcg_debug_assert(vj >= 0x20 && vj <= 0x3f);
1112
+ tcg_debug_assert(vk >= 0x20 && vk <= 0x3f);
1113
+ tcg_debug_assert(va >= 0x20 && va <= 0x3f);
1114
+ return encode_djka_slots(opc, vd & 0x1f, vj & 0x1f, vk & 0x1f, va & 0x1f);
1115
+}
1116
+
1117
/* Emits the `clz.w d, j` instruction. */
1118
static void __attribute__((unused))
1119
tcg_out_opc_clz_w(TCGContext *s, TCGReg d, TCGReg j)
1120
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
1121
tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12));
104
}
1122
}
105
1123
106
-uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
1124
+/* Emits the `vfmadd.s vd, vj, vk, va` instruction. */
107
+uint16_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
1125
+static void __attribute__((unused))
108
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
1126
+tcg_out_opc_vfmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
109
{
1127
+{
110
return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
1128
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_S, vd, vj, vk, va));
111
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
1129
+}
1130
+
1131
+/* Emits the `vfmadd.d vd, vj, vk, va` instruction. */
1132
+static void __attribute__((unused))
1133
+tcg_out_opc_vfmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1134
+{
1135
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMADD_D, vd, vj, vk, va));
1136
+}
1137
+
1138
+/* Emits the `vfmsub.s vd, vj, vk, va` instruction. */
1139
+static void __attribute__((unused))
1140
+tcg_out_opc_vfmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1141
+{
1142
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_S, vd, vj, vk, va));
1143
+}
1144
+
1145
+/* Emits the `vfmsub.d vd, vj, vk, va` instruction. */
1146
+static void __attribute__((unused))
1147
+tcg_out_opc_vfmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1148
+{
1149
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFMSUB_D, vd, vj, vk, va));
1150
+}
1151
+
1152
+/* Emits the `vfnmadd.s vd, vj, vk, va` instruction. */
1153
+static void __attribute__((unused))
1154
+tcg_out_opc_vfnmadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1155
+{
1156
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_S, vd, vj, vk, va));
1157
+}
1158
+
1159
+/* Emits the `vfnmadd.d vd, vj, vk, va` instruction. */
1160
+static void __attribute__((unused))
1161
+tcg_out_opc_vfnmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1162
+{
1163
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMADD_D, vd, vj, vk, va));
1164
+}
1165
+
1166
+/* Emits the `vfnmsub.s vd, vj, vk, va` instruction. */
1167
+static void __attribute__((unused))
1168
+tcg_out_opc_vfnmsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1169
+{
1170
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_S, vd, vj, vk, va));
1171
+}
1172
+
1173
+/* Emits the `vfnmsub.d vd, vj, vk, va` instruction. */
1174
+static void __attribute__((unused))
1175
+tcg_out_opc_vfnmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1176
+{
1177
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VFNMSUB_D, vd, vj, vk, va));
1178
+}
1179
+
1180
+/* Emits the `vfcmp.caf.s vd, vj, vk` instruction. */
1181
+static void __attribute__((unused))
1182
+tcg_out_opc_vfcmp_caf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1183
+{
1184
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_S, vd, vj, vk));
1185
+}
1186
+
1187
+/* Emits the `vfcmp.saf.s vd, vj, vk` instruction. */
1188
+static void __attribute__((unused))
1189
+tcg_out_opc_vfcmp_saf_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1190
+{
1191
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_S, vd, vj, vk));
1192
+}
1193
+
1194
+/* Emits the `vfcmp.clt.s vd, vj, vk` instruction. */
1195
+static void __attribute__((unused))
1196
+tcg_out_opc_vfcmp_clt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1197
+{
1198
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_S, vd, vj, vk));
1199
+}
1200
+
1201
+/* Emits the `vfcmp.slt.s vd, vj, vk` instruction. */
1202
+static void __attribute__((unused))
1203
+tcg_out_opc_vfcmp_slt_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1204
+{
1205
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_S, vd, vj, vk));
1206
+}
1207
+
1208
+/* Emits the `vfcmp.ceq.s vd, vj, vk` instruction. */
1209
+static void __attribute__((unused))
1210
+tcg_out_opc_vfcmp_ceq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1211
+{
1212
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_S, vd, vj, vk));
1213
+}
1214
+
1215
+/* Emits the `vfcmp.seq.s vd, vj, vk` instruction. */
1216
+static void __attribute__((unused))
1217
+tcg_out_opc_vfcmp_seq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1218
+{
1219
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_S, vd, vj, vk));
1220
+}
1221
+
1222
+/* Emits the `vfcmp.cle.s vd, vj, vk` instruction. */
1223
+static void __attribute__((unused))
1224
+tcg_out_opc_vfcmp_cle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1225
+{
1226
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_S, vd, vj, vk));
1227
+}
1228
+
1229
+/* Emits the `vfcmp.sle.s vd, vj, vk` instruction. */
1230
+static void __attribute__((unused))
1231
+tcg_out_opc_vfcmp_sle_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1232
+{
1233
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_S, vd, vj, vk));
1234
+}
1235
+
1236
+/* Emits the `vfcmp.cun.s vd, vj, vk` instruction. */
1237
+static void __attribute__((unused))
1238
+tcg_out_opc_vfcmp_cun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1239
+{
1240
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_S, vd, vj, vk));
1241
+}
1242
+
1243
+/* Emits the `vfcmp.sun.s vd, vj, vk` instruction. */
1244
+static void __attribute__((unused))
1245
+tcg_out_opc_vfcmp_sun_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1246
+{
1247
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_S, vd, vj, vk));
1248
+}
1249
+
1250
+/* Emits the `vfcmp.cult.s vd, vj, vk` instruction. */
1251
+static void __attribute__((unused))
1252
+tcg_out_opc_vfcmp_cult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1253
+{
1254
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_S, vd, vj, vk));
1255
+}
1256
+
1257
+/* Emits the `vfcmp.sult.s vd, vj, vk` instruction. */
1258
+static void __attribute__((unused))
1259
+tcg_out_opc_vfcmp_sult_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1260
+{
1261
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_S, vd, vj, vk));
1262
+}
1263
+
1264
+/* Emits the `vfcmp.cueq.s vd, vj, vk` instruction. */
1265
+static void __attribute__((unused))
1266
+tcg_out_opc_vfcmp_cueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1267
+{
1268
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_S, vd, vj, vk));
1269
+}
1270
+
1271
+/* Emits the `vfcmp.sueq.s vd, vj, vk` instruction. */
1272
+static void __attribute__((unused))
1273
+tcg_out_opc_vfcmp_sueq_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1274
+{
1275
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_S, vd, vj, vk));
1276
+}
1277
+
1278
+/* Emits the `vfcmp.cule.s vd, vj, vk` instruction. */
1279
+static void __attribute__((unused))
1280
+tcg_out_opc_vfcmp_cule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1281
+{
1282
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_S, vd, vj, vk));
1283
+}
1284
+
1285
+/* Emits the `vfcmp.sule.s vd, vj, vk` instruction. */
1286
+static void __attribute__((unused))
1287
+tcg_out_opc_vfcmp_sule_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1288
+{
1289
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_S, vd, vj, vk));
1290
+}
1291
+
1292
+/* Emits the `vfcmp.cne.s vd, vj, vk` instruction. */
1293
+static void __attribute__((unused))
1294
+tcg_out_opc_vfcmp_cne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1295
+{
1296
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_S, vd, vj, vk));
1297
+}
1298
+
1299
+/* Emits the `vfcmp.sne.s vd, vj, vk` instruction. */
1300
+static void __attribute__((unused))
1301
+tcg_out_opc_vfcmp_sne_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1302
+{
1303
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_S, vd, vj, vk));
1304
+}
1305
+
1306
+/* Emits the `vfcmp.cor.s vd, vj, vk` instruction. */
1307
+static void __attribute__((unused))
1308
+tcg_out_opc_vfcmp_cor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1309
+{
1310
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_S, vd, vj, vk));
1311
+}
1312
+
1313
+/* Emits the `vfcmp.sor.s vd, vj, vk` instruction. */
1314
+static void __attribute__((unused))
1315
+tcg_out_opc_vfcmp_sor_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1316
+{
1317
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_S, vd, vj, vk));
1318
+}
1319
+
1320
+/* Emits the `vfcmp.cune.s vd, vj, vk` instruction. */
1321
+static void __attribute__((unused))
1322
+tcg_out_opc_vfcmp_cune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1323
+{
1324
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_S, vd, vj, vk));
1325
+}
1326
+
1327
+/* Emits the `vfcmp.sune.s vd, vj, vk` instruction. */
1328
+static void __attribute__((unused))
1329
+tcg_out_opc_vfcmp_sune_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1330
+{
1331
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_S, vd, vj, vk));
1332
+}
1333
+
1334
+/* Emits the `vfcmp.caf.d vd, vj, vk` instruction. */
1335
+static void __attribute__((unused))
1336
+tcg_out_opc_vfcmp_caf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1337
+{
1338
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CAF_D, vd, vj, vk));
1339
+}
1340
+
1341
+/* Emits the `vfcmp.saf.d vd, vj, vk` instruction. */
1342
+static void __attribute__((unused))
1343
+tcg_out_opc_vfcmp_saf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1344
+{
1345
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SAF_D, vd, vj, vk));
1346
+}
1347
+
1348
+/* Emits the `vfcmp.clt.d vd, vj, vk` instruction. */
1349
+static void __attribute__((unused))
1350
+tcg_out_opc_vfcmp_clt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1351
+{
1352
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLT_D, vd, vj, vk));
1353
+}
1354
+
1355
+/* Emits the `vfcmp.slt.d vd, vj, vk` instruction. */
1356
+static void __attribute__((unused))
1357
+tcg_out_opc_vfcmp_slt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1358
+{
1359
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLT_D, vd, vj, vk));
1360
+}
1361
+
1362
+/* Emits the `vfcmp.ceq.d vd, vj, vk` instruction. */
1363
+static void __attribute__((unused))
1364
+tcg_out_opc_vfcmp_ceq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1365
+{
1366
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CEQ_D, vd, vj, vk));
1367
+}
1368
+
1369
+/* Emits the `vfcmp.seq.d vd, vj, vk` instruction. */
1370
+static void __attribute__((unused))
1371
+tcg_out_opc_vfcmp_seq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1372
+{
1373
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SEQ_D, vd, vj, vk));
1374
+}
1375
+
1376
+/* Emits the `vfcmp.cle.d vd, vj, vk` instruction. */
1377
+static void __attribute__((unused))
1378
+tcg_out_opc_vfcmp_cle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1379
+{
1380
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CLE_D, vd, vj, vk));
1381
+}
1382
+
1383
+/* Emits the `vfcmp.sle.d vd, vj, vk` instruction. */
1384
+static void __attribute__((unused))
1385
+tcg_out_opc_vfcmp_sle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1386
+{
1387
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SLE_D, vd, vj, vk));
1388
+}
1389
+
1390
+/* Emits the `vfcmp.cun.d vd, vj, vk` instruction. */
1391
+static void __attribute__((unused))
1392
+tcg_out_opc_vfcmp_cun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1393
+{
1394
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUN_D, vd, vj, vk));
1395
+}
1396
+
1397
+/* Emits the `vfcmp.sun.d vd, vj, vk` instruction. */
1398
+static void __attribute__((unused))
1399
+tcg_out_opc_vfcmp_sun_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1400
+{
1401
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUN_D, vd, vj, vk));
1402
+}
1403
+
1404
+/* Emits the `vfcmp.cult.d vd, vj, vk` instruction. */
1405
+static void __attribute__((unused))
1406
+tcg_out_opc_vfcmp_cult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1407
+{
1408
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULT_D, vd, vj, vk));
1409
+}
1410
+
1411
+/* Emits the `vfcmp.sult.d vd, vj, vk` instruction. */
1412
+static void __attribute__((unused))
1413
+tcg_out_opc_vfcmp_sult_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1414
+{
1415
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULT_D, vd, vj, vk));
1416
+}
1417
+
1418
+/* Emits the `vfcmp.cueq.d vd, vj, vk` instruction. */
1419
+static void __attribute__((unused))
1420
+tcg_out_opc_vfcmp_cueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1421
+{
1422
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUEQ_D, vd, vj, vk));
1423
+}
1424
+
1425
+/* Emits the `vfcmp.sueq.d vd, vj, vk` instruction. */
1426
+static void __attribute__((unused))
1427
+tcg_out_opc_vfcmp_sueq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1428
+{
1429
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUEQ_D, vd, vj, vk));
1430
+}
1431
+
1432
+/* Emits the `vfcmp.cule.d vd, vj, vk` instruction. */
1433
+static void __attribute__((unused))
1434
+tcg_out_opc_vfcmp_cule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1435
+{
1436
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CULE_D, vd, vj, vk));
1437
+}
1438
+
1439
+/* Emits the `vfcmp.sule.d vd, vj, vk` instruction. */
1440
+static void __attribute__((unused))
1441
+tcg_out_opc_vfcmp_sule_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1442
+{
1443
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SULE_D, vd, vj, vk));
1444
+}
1445
+
1446
+/* Emits the `vfcmp.cne.d vd, vj, vk` instruction. */
1447
+static void __attribute__((unused))
1448
+tcg_out_opc_vfcmp_cne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1449
+{
1450
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CNE_D, vd, vj, vk));
1451
+}
1452
+
1453
+/* Emits the `vfcmp.sne.d vd, vj, vk` instruction. */
1454
+static void __attribute__((unused))
1455
+tcg_out_opc_vfcmp_sne_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1456
+{
1457
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SNE_D, vd, vj, vk));
1458
+}
1459
+
1460
+/* Emits the `vfcmp.cor.d vd, vj, vk` instruction. */
1461
+static void __attribute__((unused))
1462
+tcg_out_opc_vfcmp_cor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1463
+{
1464
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_COR_D, vd, vj, vk));
1465
+}
1466
+
1467
+/* Emits the `vfcmp.sor.d vd, vj, vk` instruction. */
1468
+static void __attribute__((unused))
1469
+tcg_out_opc_vfcmp_sor_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1470
+{
1471
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SOR_D, vd, vj, vk));
1472
+}
1473
+
1474
+/* Emits the `vfcmp.cune.d vd, vj, vk` instruction. */
1475
+static void __attribute__((unused))
1476
+tcg_out_opc_vfcmp_cune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1477
+{
1478
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_CUNE_D, vd, vj, vk));
1479
+}
1480
+
1481
+/* Emits the `vfcmp.sune.d vd, vj, vk` instruction. */
1482
+static void __attribute__((unused))
1483
+tcg_out_opc_vfcmp_sune_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1484
+{
1485
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCMP_SUNE_D, vd, vj, vk));
1486
+}
1487
+
1488
+/* Emits the `vbitsel.v vd, vj, vk, va` instruction. */
1489
+static void __attribute__((unused))
1490
+tcg_out_opc_vbitsel_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1491
+{
1492
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VBITSEL_V, vd, vj, vk, va));
1493
+}
1494
+
1495
+/* Emits the `vshuf.b vd, vj, vk, va` instruction. */
1496
+static void __attribute__((unused))
1497
+tcg_out_opc_vshuf_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk, TCGReg va)
1498
+{
1499
+ tcg_out32(s, encode_vdvjvkva_insn(OPC_VSHUF_B, vd, vj, vk, va));
1500
+}
1501
+
1502
/* Emits the `addu16i.d d, j, sk16` instruction. */
1503
static void __attribute__((unused))
1504
tcg_out_opc_addu16i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
1505
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_ld_wu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
1506
tcg_out32(s, encode_djsk12_insn(OPC_LD_WU, d, j, sk12));
112
}
1507
}
113
1508
114
void glue(address_space_stb, SUFFIX)(ARG1_DECL,
1509
+/* Emits the `vld vd, j, sk12` instruction. */
115
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1510
+static void __attribute__((unused))
116
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
1511
+tcg_out_opc_vld(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
117
{
1512
+{
118
uint8_t *ptr;
1513
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VLD, vd, j, sk12));
119
MemoryRegion *mr;
1514
+}
120
@@ -XXX,XX +XXX,XX @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL,
1515
+
121
1516
+/* Emits the `vst vd, j, sk12` instruction. */
122
/* warning: addr must be aligned */
1517
+static void __attribute__((unused))
123
static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
1518
+tcg_out_opc_vst(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
124
- hwaddr addr, uint32_t val, MemTxAttrs attrs,
1519
+{
125
+ hwaddr addr, uint16_t val, MemTxAttrs attrs,
1520
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VST, vd, j, sk12));
126
MemTxResult *result, enum device_endian endian)
1521
+}
127
{
1522
+
128
uint8_t *ptr;
1523
+/* Emits the `vldrepl.d vd, j, sk9` instruction. */
129
@@ -XXX,XX +XXX,XX @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
1524
+static void __attribute__((unused))
1525
+tcg_out_opc_vldrepl_d(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk9)
1526
+{
1527
+ tcg_out32(s, encode_vdjsk9_insn(OPC_VLDREPL_D, vd, j, sk9));
1528
+}
1529
+
1530
+/* Emits the `vldrepl.w vd, j, sk10` instruction. */
1531
+static void __attribute__((unused))
1532
+tcg_out_opc_vldrepl_w(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk10)
1533
+{
1534
+ tcg_out32(s, encode_vdjsk10_insn(OPC_VLDREPL_W, vd, j, sk10));
1535
+}
1536
+
1537
+/* Emits the `vldrepl.h vd, j, sk11` instruction. */
1538
+static void __attribute__((unused))
1539
+tcg_out_opc_vldrepl_h(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk11)
1540
+{
1541
+ tcg_out32(s, encode_vdjsk11_insn(OPC_VLDREPL_H, vd, j, sk11));
1542
+}
1543
+
1544
+/* Emits the `vldrepl.b vd, j, sk12` instruction. */
1545
+static void __attribute__((unused))
1546
+tcg_out_opc_vldrepl_b(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk12)
1547
+{
1548
+ tcg_out32(s, encode_vdjsk12_insn(OPC_VLDREPL_B, vd, j, sk12));
1549
+}
1550
+
1551
+/* Emits the `vstelm.d vd, j, sk8, un1` instruction. */
1552
+static void __attribute__((unused))
1553
+tcg_out_opc_vstelm_d(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1554
+ uint32_t un1)
1555
+{
1556
+ tcg_out32(s, encode_vdjsk8un1_insn(OPC_VSTELM_D, vd, j, sk8, un1));
1557
+}
1558
+
1559
+/* Emits the `vstelm.w vd, j, sk8, un2` instruction. */
1560
+static void __attribute__((unused))
1561
+tcg_out_opc_vstelm_w(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1562
+ uint32_t un2)
1563
+{
1564
+ tcg_out32(s, encode_vdjsk8un2_insn(OPC_VSTELM_W, vd, j, sk8, un2));
1565
+}
1566
+
1567
+/* Emits the `vstelm.h vd, j, sk8, un3` instruction. */
1568
+static void __attribute__((unused))
1569
+tcg_out_opc_vstelm_h(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1570
+ uint32_t un3)
1571
+{
1572
+ tcg_out32(s, encode_vdjsk8un3_insn(OPC_VSTELM_H, vd, j, sk8, un3));
1573
+}
1574
+
1575
+/* Emits the `vstelm.b vd, j, sk8, un4` instruction. */
1576
+static void __attribute__((unused))
1577
+tcg_out_opc_vstelm_b(TCGContext *s, TCGReg vd, TCGReg j, int32_t sk8,
1578
+ uint32_t un4)
1579
+{
1580
+ tcg_out32(s, encode_vdjsk8un4_insn(OPC_VSTELM_B, vd, j, sk8, un4));
1581
+}
1582
+
1583
/* Emits the `ldx.b d, j, k` instruction. */
1584
static void __attribute__((unused))
1585
tcg_out_opc_ldx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1586
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_ldx_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
1587
tcg_out32(s, encode_djk_insn(OPC_LDX_WU, d, j, k));
130
}
1588
}
131
1589
132
void glue(address_space_stw, SUFFIX)(ARG1_DECL,
1590
+/* Emits the `vldx vd, j, k` instruction. */
133
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1591
+static void __attribute__((unused))
134
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
1592
+tcg_out_opc_vldx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
135
{
1593
+{
136
glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
1594
+ tcg_out32(s, encode_vdjk_insn(OPC_VLDX, vd, j, k));
137
DEVICE_NATIVE_ENDIAN);
1595
+}
1596
+
1597
+/* Emits the `vstx vd, j, k` instruction. */
1598
+static void __attribute__((unused))
1599
+tcg_out_opc_vstx(TCGContext *s, TCGReg vd, TCGReg j, TCGReg k)
1600
+{
1601
+ tcg_out32(s, encode_vdjk_insn(OPC_VSTX, vd, j, k));
1602
+}
1603
+
1604
/* Emits the `dbar ud15` instruction. */
1605
static void __attribute__((unused))
1606
tcg_out_opc_dbar(TCGContext *s, uint32_t ud15)
1607
@@ -XXX,XX +XXX,XX @@ tcg_out_opc_bleu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
1608
tcg_out32(s, encode_djsk16_insn(OPC_BLEU, d, j, sk16));
138
}
1609
}
139
1610
140
void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
1611
+/* Emits the `vseq.b vd, vj, vk` instruction. */
141
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1612
+static void __attribute__((unused))
142
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
1613
+tcg_out_opc_vseq_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
143
{
1614
+{
144
glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
1615
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_B, vd, vj, vk));
145
DEVICE_LITTLE_ENDIAN);
1616
+}
146
}
1617
+
147
1618
+/* Emits the `vseq.h vd, vj, vk` instruction. */
148
void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
1619
+static void __attribute__((unused))
149
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1620
+tcg_out_opc_vseq_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
150
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
1621
+{
151
{
1622
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_H, vd, vj, vk));
152
glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
1623
+}
153
DEVICE_BIG_ENDIAN);
1624
+
1625
+/* Emits the `vseq.w vd, vj, vk` instruction. */
1626
+static void __attribute__((unused))
1627
+tcg_out_opc_vseq_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1628
+{
1629
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_W, vd, vj, vk));
1630
+}
1631
+
1632
+/* Emits the `vseq.d vd, vj, vk` instruction. */
1633
+static void __attribute__((unused))
1634
+tcg_out_opc_vseq_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1635
+{
1636
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSEQ_D, vd, vj, vk));
1637
+}
1638
+
1639
+/* Emits the `vsle.b vd, vj, vk` instruction. */
1640
+static void __attribute__((unused))
1641
+tcg_out_opc_vsle_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1642
+{
1643
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_B, vd, vj, vk));
1644
+}
1645
+
1646
+/* Emits the `vsle.h vd, vj, vk` instruction. */
1647
+static void __attribute__((unused))
1648
+tcg_out_opc_vsle_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1649
+{
1650
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_H, vd, vj, vk));
1651
+}
1652
+
1653
+/* Emits the `vsle.w vd, vj, vk` instruction. */
1654
+static void __attribute__((unused))
1655
+tcg_out_opc_vsle_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1656
+{
1657
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_W, vd, vj, vk));
1658
+}
1659
+
1660
+/* Emits the `vsle.d vd, vj, vk` instruction. */
1661
+static void __attribute__((unused))
1662
+tcg_out_opc_vsle_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1663
+{
1664
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_D, vd, vj, vk));
1665
+}
1666
+
1667
+/* Emits the `vsle.bu vd, vj, vk` instruction. */
1668
+static void __attribute__((unused))
1669
+tcg_out_opc_vsle_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1670
+{
1671
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_BU, vd, vj, vk));
1672
+}
1673
+
1674
+/* Emits the `vsle.hu vd, vj, vk` instruction. */
1675
+static void __attribute__((unused))
1676
+tcg_out_opc_vsle_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1677
+{
1678
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_HU, vd, vj, vk));
1679
+}
1680
+
1681
+/* Emits the `vsle.wu vd, vj, vk` instruction. */
1682
+static void __attribute__((unused))
1683
+tcg_out_opc_vsle_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1684
+{
1685
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_WU, vd, vj, vk));
1686
+}
1687
+
1688
+/* Emits the `vsle.du vd, vj, vk` instruction. */
1689
+static void __attribute__((unused))
1690
+tcg_out_opc_vsle_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1691
+{
1692
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLE_DU, vd, vj, vk));
1693
+}
1694
+
1695
+/* Emits the `vslt.b vd, vj, vk` instruction. */
1696
+static void __attribute__((unused))
1697
+tcg_out_opc_vslt_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1698
+{
1699
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_B, vd, vj, vk));
1700
+}
1701
+
1702
+/* Emits the `vslt.h vd, vj, vk` instruction. */
1703
+static void __attribute__((unused))
1704
+tcg_out_opc_vslt_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1705
+{
1706
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_H, vd, vj, vk));
1707
+}
1708
+
1709
+/* Emits the `vslt.w vd, vj, vk` instruction. */
1710
+static void __attribute__((unused))
1711
+tcg_out_opc_vslt_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1712
+{
1713
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_W, vd, vj, vk));
1714
+}
1715
+
1716
+/* Emits the `vslt.d vd, vj, vk` instruction. */
1717
+static void __attribute__((unused))
1718
+tcg_out_opc_vslt_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1719
+{
1720
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_D, vd, vj, vk));
1721
+}
1722
+
1723
+/* Emits the `vslt.bu vd, vj, vk` instruction. */
1724
+static void __attribute__((unused))
1725
+tcg_out_opc_vslt_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1726
+{
1727
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_BU, vd, vj, vk));
1728
+}
1729
+
1730
+/* Emits the `vslt.hu vd, vj, vk` instruction. */
1731
+static void __attribute__((unused))
1732
+tcg_out_opc_vslt_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1733
+{
1734
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_HU, vd, vj, vk));
1735
+}
1736
+
1737
+/* Emits the `vslt.wu vd, vj, vk` instruction. */
1738
+static void __attribute__((unused))
1739
+tcg_out_opc_vslt_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1740
+{
1741
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_WU, vd, vj, vk));
1742
+}
1743
+
1744
+/* Emits the `vslt.du vd, vj, vk` instruction. */
1745
+static void __attribute__((unused))
1746
+tcg_out_opc_vslt_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1747
+{
1748
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLT_DU, vd, vj, vk));
1749
+}
1750
+
1751
+/* Emits the `vadd.b vd, vj, vk` instruction. */
1752
+static void __attribute__((unused))
1753
+tcg_out_opc_vadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1754
+{
1755
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_B, vd, vj, vk));
1756
+}
1757
+
1758
+/* Emits the `vadd.h vd, vj, vk` instruction. */
1759
+static void __attribute__((unused))
1760
+tcg_out_opc_vadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1761
+{
1762
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_H, vd, vj, vk));
1763
+}
1764
+
1765
+/* Emits the `vadd.w vd, vj, vk` instruction. */
1766
+static void __attribute__((unused))
1767
+tcg_out_opc_vadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1768
+{
1769
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_W, vd, vj, vk));
1770
+}
1771
+
1772
+/* Emits the `vadd.d vd, vj, vk` instruction. */
1773
+static void __attribute__((unused))
1774
+tcg_out_opc_vadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1775
+{
1776
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_D, vd, vj, vk));
1777
+}
1778
+
1779
+/* Emits the `vsub.b vd, vj, vk` instruction. */
1780
+static void __attribute__((unused))
1781
+tcg_out_opc_vsub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1782
+{
1783
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_B, vd, vj, vk));
1784
+}
1785
+
1786
+/* Emits the `vsub.h vd, vj, vk` instruction. */
1787
+static void __attribute__((unused))
1788
+tcg_out_opc_vsub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1789
+{
1790
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_H, vd, vj, vk));
1791
+}
1792
+
1793
+/* Emits the `vsub.w vd, vj, vk` instruction. */
1794
+static void __attribute__((unused))
1795
+tcg_out_opc_vsub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1796
+{
1797
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_W, vd, vj, vk));
1798
+}
1799
+
1800
+/* Emits the `vsub.d vd, vj, vk` instruction. */
1801
+static void __attribute__((unused))
1802
+tcg_out_opc_vsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1803
+{
1804
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_D, vd, vj, vk));
1805
+}
1806
+
1807
+/* Emits the `vaddwev.h.b vd, vj, vk` instruction. */
1808
+static void __attribute__((unused))
1809
+tcg_out_opc_vaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1810
+{
1811
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_B, vd, vj, vk));
1812
+}
1813
+
1814
+/* Emits the `vaddwev.w.h vd, vj, vk` instruction. */
1815
+static void __attribute__((unused))
1816
+tcg_out_opc_vaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1817
+{
1818
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_H, vd, vj, vk));
1819
+}
1820
+
1821
+/* Emits the `vaddwev.d.w vd, vj, vk` instruction. */
1822
+static void __attribute__((unused))
1823
+tcg_out_opc_vaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1824
+{
1825
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_W, vd, vj, vk));
1826
+}
1827
+
1828
+/* Emits the `vaddwev.q.d vd, vj, vk` instruction. */
1829
+static void __attribute__((unused))
1830
+tcg_out_opc_vaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1831
+{
1832
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_D, vd, vj, vk));
1833
+}
1834
+
1835
+/* Emits the `vsubwev.h.b vd, vj, vk` instruction. */
1836
+static void __attribute__((unused))
1837
+tcg_out_opc_vsubwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1838
+{
1839
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_B, vd, vj, vk));
1840
+}
1841
+
1842
+/* Emits the `vsubwev.w.h vd, vj, vk` instruction. */
1843
+static void __attribute__((unused))
1844
+tcg_out_opc_vsubwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1845
+{
1846
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_H, vd, vj, vk));
1847
+}
1848
+
1849
+/* Emits the `vsubwev.d.w vd, vj, vk` instruction. */
1850
+static void __attribute__((unused))
1851
+tcg_out_opc_vsubwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1852
+{
1853
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_W, vd, vj, vk));
1854
+}
1855
+
1856
+/* Emits the `vsubwev.q.d vd, vj, vk` instruction. */
1857
+static void __attribute__((unused))
1858
+tcg_out_opc_vsubwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1859
+{
1860
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_D, vd, vj, vk));
1861
+}
1862
+
1863
+/* Emits the `vaddwod.h.b vd, vj, vk` instruction. */
1864
+static void __attribute__((unused))
1865
+tcg_out_opc_vaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1866
+{
1867
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_B, vd, vj, vk));
1868
+}
1869
+
1870
+/* Emits the `vaddwod.w.h vd, vj, vk` instruction. */
1871
+static void __attribute__((unused))
1872
+tcg_out_opc_vaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1873
+{
1874
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_H, vd, vj, vk));
1875
+}
1876
+
1877
+/* Emits the `vaddwod.d.w vd, vj, vk` instruction. */
1878
+static void __attribute__((unused))
1879
+tcg_out_opc_vaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1880
+{
1881
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_W, vd, vj, vk));
1882
+}
1883
+
1884
+/* Emits the `vaddwod.q.d vd, vj, vk` instruction. */
1885
+static void __attribute__((unused))
1886
+tcg_out_opc_vaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1887
+{
1888
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_D, vd, vj, vk));
1889
+}
1890
+
1891
+/* Emits the `vsubwod.h.b vd, vj, vk` instruction. */
1892
+static void __attribute__((unused))
1893
+tcg_out_opc_vsubwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1894
+{
1895
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_B, vd, vj, vk));
1896
+}
1897
+
1898
+/* Emits the `vsubwod.w.h vd, vj, vk` instruction. */
1899
+static void __attribute__((unused))
1900
+tcg_out_opc_vsubwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1901
+{
1902
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_H, vd, vj, vk));
1903
+}
1904
+
1905
+/* Emits the `vsubwod.d.w vd, vj, vk` instruction. */
1906
+static void __attribute__((unused))
1907
+tcg_out_opc_vsubwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1908
+{
1909
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_W, vd, vj, vk));
1910
+}
1911
+
1912
+/* Emits the `vsubwod.q.d vd, vj, vk` instruction. */
1913
+static void __attribute__((unused))
1914
+tcg_out_opc_vsubwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1915
+{
1916
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_D, vd, vj, vk));
1917
+}
1918
+
1919
+/* Emits the `vaddwev.h.bu vd, vj, vk` instruction. */
1920
+static void __attribute__((unused))
1921
+tcg_out_opc_vaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1922
+{
1923
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU, vd, vj, vk));
1924
+}
1925
+
1926
+/* Emits the `vaddwev.w.hu vd, vj, vk` instruction. */
1927
+static void __attribute__((unused))
1928
+tcg_out_opc_vaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1929
+{
1930
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU, vd, vj, vk));
1931
+}
1932
+
1933
+/* Emits the `vaddwev.d.wu vd, vj, vk` instruction. */
1934
+static void __attribute__((unused))
1935
+tcg_out_opc_vaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1936
+{
1937
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU, vd, vj, vk));
1938
+}
1939
+
1940
+/* Emits the `vaddwev.q.du vd, vj, vk` instruction. */
1941
+static void __attribute__((unused))
1942
+tcg_out_opc_vaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1943
+{
1944
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU, vd, vj, vk));
1945
+}
1946
+
1947
+/* Emits the `vsubwev.h.bu vd, vj, vk` instruction. */
1948
+static void __attribute__((unused))
1949
+tcg_out_opc_vsubwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1950
+{
1951
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_H_BU, vd, vj, vk));
1952
+}
1953
+
1954
+/* Emits the `vsubwev.w.hu vd, vj, vk` instruction. */
1955
+static void __attribute__((unused))
1956
+tcg_out_opc_vsubwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1957
+{
1958
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_W_HU, vd, vj, vk));
1959
+}
1960
+
1961
+/* Emits the `vsubwev.d.wu vd, vj, vk` instruction. */
1962
+static void __attribute__((unused))
1963
+tcg_out_opc_vsubwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1964
+{
1965
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_D_WU, vd, vj, vk));
1966
+}
1967
+
1968
+/* Emits the `vsubwev.q.du vd, vj, vk` instruction. */
1969
+static void __attribute__((unused))
1970
+tcg_out_opc_vsubwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1971
+{
1972
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWEV_Q_DU, vd, vj, vk));
1973
+}
1974
+
1975
+/* Emits the `vaddwod.h.bu vd, vj, vk` instruction. */
1976
+static void __attribute__((unused))
1977
+tcg_out_opc_vaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1978
+{
1979
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU, vd, vj, vk));
1980
+}
1981
+
1982
+/* Emits the `vaddwod.w.hu vd, vj, vk` instruction. */
1983
+static void __attribute__((unused))
1984
+tcg_out_opc_vaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1985
+{
1986
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU, vd, vj, vk));
1987
+}
1988
+
1989
+/* Emits the `vaddwod.d.wu vd, vj, vk` instruction. */
1990
+static void __attribute__((unused))
1991
+tcg_out_opc_vaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1992
+{
1993
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU, vd, vj, vk));
1994
+}
1995
+
1996
+/* Emits the `vaddwod.q.du vd, vj, vk` instruction. */
1997
+static void __attribute__((unused))
1998
+tcg_out_opc_vaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
1999
+{
2000
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU, vd, vj, vk));
2001
+}
2002
+
2003
+/* Emits the `vsubwod.h.bu vd, vj, vk` instruction. */
2004
+static void __attribute__((unused))
2005
+tcg_out_opc_vsubwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2006
+{
2007
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_H_BU, vd, vj, vk));
2008
+}
2009
+
2010
+/* Emits the `vsubwod.w.hu vd, vj, vk` instruction. */
2011
+static void __attribute__((unused))
2012
+tcg_out_opc_vsubwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2013
+{
2014
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_W_HU, vd, vj, vk));
2015
+}
2016
+
2017
+/* Emits the `vsubwod.d.wu vd, vj, vk` instruction. */
2018
+static void __attribute__((unused))
2019
+tcg_out_opc_vsubwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2020
+{
2021
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_D_WU, vd, vj, vk));
2022
+}
2023
+
2024
+/* Emits the `vsubwod.q.du vd, vj, vk` instruction. */
2025
+static void __attribute__((unused))
2026
+tcg_out_opc_vsubwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2027
+{
2028
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUBWOD_Q_DU, vd, vj, vk));
2029
+}
2030
+
2031
+/* Emits the `vaddwev.h.bu.b vd, vj, vk` instruction. */
2032
+static void __attribute__((unused))
2033
+tcg_out_opc_vaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2034
+{
2035
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_H_BU_B, vd, vj, vk));
2036
+}
2037
+
2038
+/* Emits the `vaddwev.w.hu.h vd, vj, vk` instruction. */
2039
+static void __attribute__((unused))
2040
+tcg_out_opc_vaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2041
+{
2042
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_W_HU_H, vd, vj, vk));
2043
+}
2044
+
2045
+/* Emits the `vaddwev.d.wu.w vd, vj, vk` instruction. */
2046
+static void __attribute__((unused))
2047
+tcg_out_opc_vaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2048
+{
2049
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_D_WU_W, vd, vj, vk));
2050
+}
2051
+
2052
+/* Emits the `vaddwev.q.du.d vd, vj, vk` instruction. */
2053
+static void __attribute__((unused))
2054
+tcg_out_opc_vaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2055
+{
2056
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWEV_Q_DU_D, vd, vj, vk));
2057
+}
2058
+
2059
+/* Emits the `vaddwod.h.bu.b vd, vj, vk` instruction. */
2060
+static void __attribute__((unused))
2061
+tcg_out_opc_vaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2062
+{
2063
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_H_BU_B, vd, vj, vk));
2064
+}
2065
+
2066
+/* Emits the `vaddwod.w.hu.h vd, vj, vk` instruction. */
2067
+static void __attribute__((unused))
2068
+tcg_out_opc_vaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2069
+{
2070
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_W_HU_H, vd, vj, vk));
2071
+}
2072
+
2073
+/* Emits the `vaddwod.d.wu.w vd, vj, vk` instruction. */
2074
+static void __attribute__((unused))
2075
+tcg_out_opc_vaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2076
+{
2077
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_D_WU_W, vd, vj, vk));
2078
+}
2079
+
2080
+/* Emits the `vaddwod.q.du.d vd, vj, vk` instruction. */
2081
+static void __attribute__((unused))
2082
+tcg_out_opc_vaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2083
+{
2084
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDWOD_Q_DU_D, vd, vj, vk));
2085
+}
2086
+
2087
+/* Emits the `vsadd.b vd, vj, vk` instruction. */
2088
+static void __attribute__((unused))
2089
+tcg_out_opc_vsadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2090
+{
2091
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_B, vd, vj, vk));
2092
+}
2093
+
2094
+/* Emits the `vsadd.h vd, vj, vk` instruction. */
2095
+static void __attribute__((unused))
2096
+tcg_out_opc_vsadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2097
+{
2098
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_H, vd, vj, vk));
2099
+}
2100
+
2101
+/* Emits the `vsadd.w vd, vj, vk` instruction. */
2102
+static void __attribute__((unused))
2103
+tcg_out_opc_vsadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2104
+{
2105
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_W, vd, vj, vk));
2106
+}
2107
+
2108
+/* Emits the `vsadd.d vd, vj, vk` instruction. */
2109
+static void __attribute__((unused))
2110
+tcg_out_opc_vsadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2111
+{
2112
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_D, vd, vj, vk));
2113
+}
2114
+
2115
+/* Emits the `vssub.b vd, vj, vk` instruction. */
2116
+static void __attribute__((unused))
2117
+tcg_out_opc_vssub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2118
+{
2119
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_B, vd, vj, vk));
2120
+}
2121
+
2122
+/* Emits the `vssub.h vd, vj, vk` instruction. */
2123
+static void __attribute__((unused))
2124
+tcg_out_opc_vssub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2125
+{
2126
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_H, vd, vj, vk));
2127
+}
2128
+
2129
+/* Emits the `vssub.w vd, vj, vk` instruction. */
2130
+static void __attribute__((unused))
2131
+tcg_out_opc_vssub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2132
+{
2133
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_W, vd, vj, vk));
2134
+}
2135
+
2136
+/* Emits the `vssub.d vd, vj, vk` instruction. */
2137
+static void __attribute__((unused))
2138
+tcg_out_opc_vssub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2139
+{
2140
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_D, vd, vj, vk));
2141
+}
2142
+
2143
+/* Emits the `vsadd.bu vd, vj, vk` instruction. */
2144
+static void __attribute__((unused))
2145
+tcg_out_opc_vsadd_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2146
+{
2147
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_BU, vd, vj, vk));
2148
+}
2149
+
2150
+/* Emits the `vsadd.hu vd, vj, vk` instruction. */
2151
+static void __attribute__((unused))
2152
+tcg_out_opc_vsadd_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2153
+{
2154
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_HU, vd, vj, vk));
2155
+}
2156
+
2157
+/* Emits the `vsadd.wu vd, vj, vk` instruction. */
2158
+static void __attribute__((unused))
2159
+tcg_out_opc_vsadd_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2160
+{
2161
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_WU, vd, vj, vk));
2162
+}
2163
+
2164
+/* Emits the `vsadd.du vd, vj, vk` instruction. */
2165
+static void __attribute__((unused))
2166
+tcg_out_opc_vsadd_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2167
+{
2168
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSADD_DU, vd, vj, vk));
2169
+}
2170
+
2171
+/* Emits the `vssub.bu vd, vj, vk` instruction. */
2172
+static void __attribute__((unused))
2173
+tcg_out_opc_vssub_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2174
+{
2175
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_BU, vd, vj, vk));
2176
+}
2177
+
2178
+/* Emits the `vssub.hu vd, vj, vk` instruction. */
2179
+static void __attribute__((unused))
2180
+tcg_out_opc_vssub_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2181
+{
2182
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_HU, vd, vj, vk));
2183
+}
2184
+
2185
+/* Emits the `vssub.wu vd, vj, vk` instruction. */
2186
+static void __attribute__((unused))
2187
+tcg_out_opc_vssub_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2188
+{
2189
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_WU, vd, vj, vk));
2190
+}
2191
+
2192
+/* Emits the `vssub.du vd, vj, vk` instruction. */
2193
+static void __attribute__((unused))
2194
+tcg_out_opc_vssub_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2195
+{
2196
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSUB_DU, vd, vj, vk));
2197
+}
2198
+
2199
+/* Emits the `vhaddw.h.b vd, vj, vk` instruction. */
2200
+static void __attribute__((unused))
2201
+tcg_out_opc_vhaddw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2202
+{
2203
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_H_B, vd, vj, vk));
2204
+}
2205
+
2206
+/* Emits the `vhaddw.w.h vd, vj, vk` instruction. */
2207
+static void __attribute__((unused))
2208
+tcg_out_opc_vhaddw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2209
+{
2210
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_W_H, vd, vj, vk));
2211
+}
2212
+
2213
+/* Emits the `vhaddw.d.w vd, vj, vk` instruction. */
2214
+static void __attribute__((unused))
2215
+tcg_out_opc_vhaddw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2216
+{
2217
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_D_W, vd, vj, vk));
2218
+}
2219
+
2220
+/* Emits the `vhaddw.q.d vd, vj, vk` instruction. */
2221
+static void __attribute__((unused))
2222
+tcg_out_opc_vhaddw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2223
+{
2224
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_Q_D, vd, vj, vk));
2225
+}
2226
+
2227
+/* Emits the `vhsubw.h.b vd, vj, vk` instruction. */
2228
+static void __attribute__((unused))
2229
+tcg_out_opc_vhsubw_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2230
+{
2231
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_H_B, vd, vj, vk));
2232
+}
2233
+
2234
+/* Emits the `vhsubw.w.h vd, vj, vk` instruction. */
2235
+static void __attribute__((unused))
2236
+tcg_out_opc_vhsubw_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2237
+{
2238
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_W_H, vd, vj, vk));
2239
+}
2240
+
2241
+/* Emits the `vhsubw.d.w vd, vj, vk` instruction. */
2242
+static void __attribute__((unused))
2243
+tcg_out_opc_vhsubw_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2244
+{
2245
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_D_W, vd, vj, vk));
2246
+}
2247
+
2248
+/* Emits the `vhsubw.q.d vd, vj, vk` instruction. */
2249
+static void __attribute__((unused))
2250
+tcg_out_opc_vhsubw_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2251
+{
2252
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_Q_D, vd, vj, vk));
2253
+}
2254
+
2255
+/* Emits the `vhaddw.hu.bu vd, vj, vk` instruction. */
2256
+static void __attribute__((unused))
2257
+tcg_out_opc_vhaddw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2258
+{
2259
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_HU_BU, vd, vj, vk));
2260
+}
2261
+
2262
+/* Emits the `vhaddw.wu.hu vd, vj, vk` instruction. */
2263
+static void __attribute__((unused))
2264
+tcg_out_opc_vhaddw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2265
+{
2266
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_WU_HU, vd, vj, vk));
2267
+}
2268
+
2269
+/* Emits the `vhaddw.du.wu vd, vj, vk` instruction. */
2270
+static void __attribute__((unused))
2271
+tcg_out_opc_vhaddw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2272
+{
2273
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_DU_WU, vd, vj, vk));
2274
+}
2275
+
2276
+/* Emits the `vhaddw.qu.du vd, vj, vk` instruction. */
2277
+static void __attribute__((unused))
2278
+tcg_out_opc_vhaddw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2279
+{
2280
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHADDW_QU_DU, vd, vj, vk));
2281
+}
2282
+
2283
+/* Emits the `vhsubw.hu.bu vd, vj, vk` instruction. */
2284
+static void __attribute__((unused))
2285
+tcg_out_opc_vhsubw_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2286
+{
2287
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_HU_BU, vd, vj, vk));
2288
+}
2289
+
2290
+/* Emits the `vhsubw.wu.hu vd, vj, vk` instruction. */
2291
+static void __attribute__((unused))
2292
+tcg_out_opc_vhsubw_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2293
+{
2294
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_WU_HU, vd, vj, vk));
2295
+}
2296
+
2297
+/* Emits the `vhsubw.du.wu vd, vj, vk` instruction. */
2298
+static void __attribute__((unused))
2299
+tcg_out_opc_vhsubw_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2300
+{
2301
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_DU_WU, vd, vj, vk));
2302
+}
2303
+
2304
+/* Emits the `vhsubw.qu.du vd, vj, vk` instruction. */
2305
+static void __attribute__((unused))
2306
+tcg_out_opc_vhsubw_qu_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2307
+{
2308
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VHSUBW_QU_DU, vd, vj, vk));
2309
+}
2310
+
2311
+/* Emits the `vadda.b vd, vj, vk` instruction. */
2312
+static void __attribute__((unused))
2313
+tcg_out_opc_vadda_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2314
+{
2315
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_B, vd, vj, vk));
2316
+}
2317
+
2318
+/* Emits the `vadda.h vd, vj, vk` instruction. */
2319
+static void __attribute__((unused))
2320
+tcg_out_opc_vadda_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2321
+{
2322
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_H, vd, vj, vk));
2323
+}
2324
+
2325
+/* Emits the `vadda.w vd, vj, vk` instruction. */
2326
+static void __attribute__((unused))
2327
+tcg_out_opc_vadda_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2328
+{
2329
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_W, vd, vj, vk));
2330
+}
2331
+
2332
+/* Emits the `vadda.d vd, vj, vk` instruction. */
2333
+static void __attribute__((unused))
2334
+tcg_out_opc_vadda_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2335
+{
2336
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADDA_D, vd, vj, vk));
2337
+}
2338
+
2339
+/* Emits the `vabsd.b vd, vj, vk` instruction. */
2340
+static void __attribute__((unused))
2341
+tcg_out_opc_vabsd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2342
+{
2343
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_B, vd, vj, vk));
2344
+}
2345
+
2346
+/* Emits the `vabsd.h vd, vj, vk` instruction. */
2347
+static void __attribute__((unused))
2348
+tcg_out_opc_vabsd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2349
+{
2350
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_H, vd, vj, vk));
2351
+}
2352
+
2353
+/* Emits the `vabsd.w vd, vj, vk` instruction. */
2354
+static void __attribute__((unused))
2355
+tcg_out_opc_vabsd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2356
+{
2357
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_W, vd, vj, vk));
2358
+}
2359
+
2360
+/* Emits the `vabsd.d vd, vj, vk` instruction. */
2361
+static void __attribute__((unused))
2362
+tcg_out_opc_vabsd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2363
+{
2364
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_D, vd, vj, vk));
2365
+}
2366
+
2367
+/* Emits the `vabsd.bu vd, vj, vk` instruction. */
2368
+static void __attribute__((unused))
2369
+tcg_out_opc_vabsd_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2370
+{
2371
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_BU, vd, vj, vk));
2372
+}
2373
+
2374
+/* Emits the `vabsd.hu vd, vj, vk` instruction. */
2375
+static void __attribute__((unused))
2376
+tcg_out_opc_vabsd_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2377
+{
2378
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_HU, vd, vj, vk));
2379
+}
2380
+
2381
+/* Emits the `vabsd.wu vd, vj, vk` instruction. */
2382
+static void __attribute__((unused))
2383
+tcg_out_opc_vabsd_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2384
+{
2385
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_WU, vd, vj, vk));
2386
+}
2387
+
2388
+/* Emits the `vabsd.du vd, vj, vk` instruction. */
2389
+static void __attribute__((unused))
2390
+tcg_out_opc_vabsd_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2391
+{
2392
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VABSD_DU, vd, vj, vk));
2393
+}
2394
+
2395
+/* Emits the `vavg.b vd, vj, vk` instruction. */
2396
+static void __attribute__((unused))
2397
+tcg_out_opc_vavg_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2398
+{
2399
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_B, vd, vj, vk));
2400
+}
2401
+
2402
+/* Emits the `vavg.h vd, vj, vk` instruction. */
2403
+static void __attribute__((unused))
2404
+tcg_out_opc_vavg_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2405
+{
2406
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_H, vd, vj, vk));
2407
+}
2408
+
2409
+/* Emits the `vavg.w vd, vj, vk` instruction. */
2410
+static void __attribute__((unused))
2411
+tcg_out_opc_vavg_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2412
+{
2413
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_W, vd, vj, vk));
2414
+}
2415
+
2416
+/* Emits the `vavg.d vd, vj, vk` instruction. */
2417
+static void __attribute__((unused))
2418
+tcg_out_opc_vavg_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2419
+{
2420
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_D, vd, vj, vk));
2421
+}
2422
+
2423
+/* Emits the `vavg.bu vd, vj, vk` instruction. */
2424
+static void __attribute__((unused))
2425
+tcg_out_opc_vavg_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2426
+{
2427
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_BU, vd, vj, vk));
2428
+}
2429
+
2430
+/* Emits the `vavg.hu vd, vj, vk` instruction. */
2431
+static void __attribute__((unused))
2432
+tcg_out_opc_vavg_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2433
+{
2434
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_HU, vd, vj, vk));
2435
+}
2436
+
2437
+/* Emits the `vavg.wu vd, vj, vk` instruction. */
2438
+static void __attribute__((unused))
2439
+tcg_out_opc_vavg_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2440
+{
2441
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_WU, vd, vj, vk));
2442
+}
2443
+
2444
+/* Emits the `vavg.du vd, vj, vk` instruction. */
2445
+static void __attribute__((unused))
2446
+tcg_out_opc_vavg_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2447
+{
2448
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVG_DU, vd, vj, vk));
2449
+}
2450
+
2451
+/* Emits the `vavgr.b vd, vj, vk` instruction. */
2452
+static void __attribute__((unused))
2453
+tcg_out_opc_vavgr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2454
+{
2455
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_B, vd, vj, vk));
2456
+}
2457
+
2458
+/* Emits the `vavgr.h vd, vj, vk` instruction. */
2459
+static void __attribute__((unused))
2460
+tcg_out_opc_vavgr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2461
+{
2462
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_H, vd, vj, vk));
2463
+}
2464
+
2465
+/* Emits the `vavgr.w vd, vj, vk` instruction. */
2466
+static void __attribute__((unused))
2467
+tcg_out_opc_vavgr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2468
+{
2469
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_W, vd, vj, vk));
2470
+}
2471
+
2472
+/* Emits the `vavgr.d vd, vj, vk` instruction. */
2473
+static void __attribute__((unused))
2474
+tcg_out_opc_vavgr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2475
+{
2476
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_D, vd, vj, vk));
2477
+}
2478
+
2479
+/* Emits the `vavgr.bu vd, vj, vk` instruction. */
2480
+static void __attribute__((unused))
2481
+tcg_out_opc_vavgr_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2482
+{
2483
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_BU, vd, vj, vk));
2484
+}
2485
+
2486
+/* Emits the `vavgr.hu vd, vj, vk` instruction. */
2487
+static void __attribute__((unused))
2488
+tcg_out_opc_vavgr_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2489
+{
2490
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_HU, vd, vj, vk));
2491
+}
2492
+
2493
+/* Emits the `vavgr.wu vd, vj, vk` instruction. */
2494
+static void __attribute__((unused))
2495
+tcg_out_opc_vavgr_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2496
+{
2497
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_WU, vd, vj, vk));
2498
+}
2499
+
2500
+/* Emits the `vavgr.du vd, vj, vk` instruction. */
2501
+static void __attribute__((unused))
2502
+tcg_out_opc_vavgr_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2503
+{
2504
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAVGR_DU, vd, vj, vk));
2505
+}
2506
+
2507
+/* Emits the `vmax.b vd, vj, vk` instruction. */
2508
+static void __attribute__((unused))
2509
+tcg_out_opc_vmax_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2510
+{
2511
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_B, vd, vj, vk));
2512
+}
2513
+
2514
+/* Emits the `vmax.h vd, vj, vk` instruction. */
2515
+static void __attribute__((unused))
2516
+tcg_out_opc_vmax_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2517
+{
2518
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_H, vd, vj, vk));
2519
+}
2520
+
2521
+/* Emits the `vmax.w vd, vj, vk` instruction. */
2522
+static void __attribute__((unused))
2523
+tcg_out_opc_vmax_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2524
+{
2525
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_W, vd, vj, vk));
2526
+}
2527
+
2528
+/* Emits the `vmax.d vd, vj, vk` instruction. */
2529
+static void __attribute__((unused))
2530
+tcg_out_opc_vmax_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2531
+{
2532
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_D, vd, vj, vk));
2533
+}
2534
+
2535
+/* Emits the `vmin.b vd, vj, vk` instruction. */
2536
+static void __attribute__((unused))
2537
+tcg_out_opc_vmin_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2538
+{
2539
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_B, vd, vj, vk));
2540
+}
2541
+
2542
+/* Emits the `vmin.h vd, vj, vk` instruction. */
2543
+static void __attribute__((unused))
2544
+tcg_out_opc_vmin_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2545
+{
2546
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_H, vd, vj, vk));
2547
+}
2548
+
2549
+/* Emits the `vmin.w vd, vj, vk` instruction. */
2550
+static void __attribute__((unused))
2551
+tcg_out_opc_vmin_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2552
+{
2553
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_W, vd, vj, vk));
2554
+}
2555
+
2556
+/* Emits the `vmin.d vd, vj, vk` instruction. */
2557
+static void __attribute__((unused))
2558
+tcg_out_opc_vmin_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2559
+{
2560
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_D, vd, vj, vk));
2561
+}
2562
+
2563
+/* Emits the `vmax.bu vd, vj, vk` instruction. */
2564
+static void __attribute__((unused))
2565
+tcg_out_opc_vmax_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2566
+{
2567
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_BU, vd, vj, vk));
2568
+}
2569
+
2570
+/* Emits the `vmax.hu vd, vj, vk` instruction. */
2571
+static void __attribute__((unused))
2572
+tcg_out_opc_vmax_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2573
+{
2574
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_HU, vd, vj, vk));
2575
+}
2576
+
2577
+/* Emits the `vmax.wu vd, vj, vk` instruction. */
2578
+static void __attribute__((unused))
2579
+tcg_out_opc_vmax_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2580
+{
2581
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_WU, vd, vj, vk));
2582
+}
2583
+
2584
+/* Emits the `vmax.du vd, vj, vk` instruction. */
2585
+static void __attribute__((unused))
2586
+tcg_out_opc_vmax_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2587
+{
2588
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMAX_DU, vd, vj, vk));
2589
+}
2590
+
2591
+/* Emits the `vmin.bu vd, vj, vk` instruction. */
2592
+static void __attribute__((unused))
2593
+tcg_out_opc_vmin_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2594
+{
2595
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_BU, vd, vj, vk));
2596
+}
2597
+
2598
+/* Emits the `vmin.hu vd, vj, vk` instruction. */
2599
+static void __attribute__((unused))
2600
+tcg_out_opc_vmin_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2601
+{
2602
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_HU, vd, vj, vk));
2603
+}
2604
+
2605
+/* Emits the `vmin.wu vd, vj, vk` instruction. */
2606
+static void __attribute__((unused))
2607
+tcg_out_opc_vmin_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2608
+{
2609
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_WU, vd, vj, vk));
2610
+}
2611
+
2612
+/* Emits the `vmin.du vd, vj, vk` instruction. */
2613
+static void __attribute__((unused))
2614
+tcg_out_opc_vmin_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2615
+{
2616
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMIN_DU, vd, vj, vk));
2617
+}
2618
+
2619
+/* Emits the `vmul.b vd, vj, vk` instruction. */
2620
+static void __attribute__((unused))
2621
+tcg_out_opc_vmul_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2622
+{
2623
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_B, vd, vj, vk));
2624
+}
2625
+
2626
+/* Emits the `vmul.h vd, vj, vk` instruction. */
2627
+static void __attribute__((unused))
2628
+tcg_out_opc_vmul_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2629
+{
2630
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_H, vd, vj, vk));
2631
+}
2632
+
2633
+/* Emits the `vmul.w vd, vj, vk` instruction. */
2634
+static void __attribute__((unused))
2635
+tcg_out_opc_vmul_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2636
+{
2637
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_W, vd, vj, vk));
2638
+}
2639
+
2640
+/* Emits the `vmul.d vd, vj, vk` instruction. */
2641
+static void __attribute__((unused))
2642
+tcg_out_opc_vmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2643
+{
2644
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUL_D, vd, vj, vk));
2645
+}
2646
+
2647
+/* Emits the `vmuh.b vd, vj, vk` instruction. */
2648
+static void __attribute__((unused))
2649
+tcg_out_opc_vmuh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2650
+{
2651
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_B, vd, vj, vk));
2652
+}
2653
+
2654
+/* Emits the `vmuh.h vd, vj, vk` instruction. */
2655
+static void __attribute__((unused))
2656
+tcg_out_opc_vmuh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2657
+{
2658
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_H, vd, vj, vk));
2659
+}
2660
+
2661
+/* Emits the `vmuh.w vd, vj, vk` instruction. */
2662
+static void __attribute__((unused))
2663
+tcg_out_opc_vmuh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2664
+{
2665
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_W, vd, vj, vk));
2666
+}
2667
+
2668
+/* Emits the `vmuh.d vd, vj, vk` instruction. */
2669
+static void __attribute__((unused))
2670
+tcg_out_opc_vmuh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2671
+{
2672
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_D, vd, vj, vk));
2673
+}
2674
+
2675
+/* Emits the `vmuh.bu vd, vj, vk` instruction. */
2676
+static void __attribute__((unused))
2677
+tcg_out_opc_vmuh_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2678
+{
2679
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_BU, vd, vj, vk));
2680
+}
2681
+
2682
+/* Emits the `vmuh.hu vd, vj, vk` instruction. */
2683
+static void __attribute__((unused))
2684
+tcg_out_opc_vmuh_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2685
+{
2686
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_HU, vd, vj, vk));
2687
+}
2688
+
2689
+/* Emits the `vmuh.wu vd, vj, vk` instruction. */
2690
+static void __attribute__((unused))
2691
+tcg_out_opc_vmuh_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2692
+{
2693
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_WU, vd, vj, vk));
2694
+}
2695
+
2696
+/* Emits the `vmuh.du vd, vj, vk` instruction. */
2697
+static void __attribute__((unused))
2698
+tcg_out_opc_vmuh_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2699
+{
2700
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMUH_DU, vd, vj, vk));
2701
+}
2702
+
2703
+/* Emits the `vmulwev.h.b vd, vj, vk` instruction. */
2704
+static void __attribute__((unused))
2705
+tcg_out_opc_vmulwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2706
+{
2707
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_B, vd, vj, vk));
2708
+}
2709
+
2710
+/* Emits the `vmulwev.w.h vd, vj, vk` instruction. */
2711
+static void __attribute__((unused))
2712
+tcg_out_opc_vmulwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2713
+{
2714
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_H, vd, vj, vk));
2715
+}
2716
+
2717
+/* Emits the `vmulwev.d.w vd, vj, vk` instruction. */
2718
+static void __attribute__((unused))
2719
+tcg_out_opc_vmulwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2720
+{
2721
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_W, vd, vj, vk));
2722
+}
2723
+
2724
+/* Emits the `vmulwev.q.d vd, vj, vk` instruction. */
2725
+static void __attribute__((unused))
2726
+tcg_out_opc_vmulwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2727
+{
2728
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_D, vd, vj, vk));
2729
+}
2730
+
2731
+/* Emits the `vmulwod.h.b vd, vj, vk` instruction. */
2732
+static void __attribute__((unused))
2733
+tcg_out_opc_vmulwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2734
+{
2735
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_B, vd, vj, vk));
2736
+}
2737
+
2738
+/* Emits the `vmulwod.w.h vd, vj, vk` instruction. */
2739
+static void __attribute__((unused))
2740
+tcg_out_opc_vmulwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2741
+{
2742
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_H, vd, vj, vk));
2743
+}
2744
+
2745
+/* Emits the `vmulwod.d.w vd, vj, vk` instruction. */
2746
+static void __attribute__((unused))
2747
+tcg_out_opc_vmulwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2748
+{
2749
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_W, vd, vj, vk));
2750
+}
2751
+
2752
+/* Emits the `vmulwod.q.d vd, vj, vk` instruction. */
2753
+static void __attribute__((unused))
2754
+tcg_out_opc_vmulwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2755
+{
2756
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_D, vd, vj, vk));
2757
+}
2758
+
2759
+/* Emits the `vmulwev.h.bu vd, vj, vk` instruction. */
2760
+static void __attribute__((unused))
2761
+tcg_out_opc_vmulwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2762
+{
2763
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU, vd, vj, vk));
2764
+}
2765
+
2766
+/* Emits the `vmulwev.w.hu vd, vj, vk` instruction. */
2767
+static void __attribute__((unused))
2768
+tcg_out_opc_vmulwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2769
+{
2770
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU, vd, vj, vk));
2771
+}
2772
+
2773
+/* Emits the `vmulwev.d.wu vd, vj, vk` instruction. */
2774
+static void __attribute__((unused))
2775
+tcg_out_opc_vmulwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2776
+{
2777
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU, vd, vj, vk));
2778
+}
2779
+
2780
+/* Emits the `vmulwev.q.du vd, vj, vk` instruction. */
2781
+static void __attribute__((unused))
2782
+tcg_out_opc_vmulwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2783
+{
2784
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU, vd, vj, vk));
2785
+}
2786
+
2787
+/* Emits the `vmulwod.h.bu vd, vj, vk` instruction. */
2788
+static void __attribute__((unused))
2789
+tcg_out_opc_vmulwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2790
+{
2791
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU, vd, vj, vk));
2792
+}
2793
+
2794
+/* Emits the `vmulwod.w.hu vd, vj, vk` instruction. */
2795
+static void __attribute__((unused))
2796
+tcg_out_opc_vmulwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2797
+{
2798
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU, vd, vj, vk));
2799
+}
2800
+
2801
+/* Emits the `vmulwod.d.wu vd, vj, vk` instruction. */
2802
+static void __attribute__((unused))
2803
+tcg_out_opc_vmulwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2804
+{
2805
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU, vd, vj, vk));
2806
+}
2807
+
2808
+/* Emits the `vmulwod.q.du vd, vj, vk` instruction. */
2809
+static void __attribute__((unused))
2810
+tcg_out_opc_vmulwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2811
+{
2812
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU, vd, vj, vk));
2813
+}
2814
+
2815
+/* Emits the `vmulwev.h.bu.b vd, vj, vk` instruction. */
2816
+static void __attribute__((unused))
2817
+tcg_out_opc_vmulwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2818
+{
2819
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_H_BU_B, vd, vj, vk));
2820
+}
2821
+
2822
+/* Emits the `vmulwev.w.hu.h vd, vj, vk` instruction. */
2823
+static void __attribute__((unused))
2824
+tcg_out_opc_vmulwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2825
+{
2826
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_W_HU_H, vd, vj, vk));
2827
+}
2828
+
2829
+/* Emits the `vmulwev.d.wu.w vd, vj, vk` instruction. */
2830
+static void __attribute__((unused))
2831
+tcg_out_opc_vmulwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2832
+{
2833
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_D_WU_W, vd, vj, vk));
2834
+}
2835
+
2836
+/* Emits the `vmulwev.q.du.d vd, vj, vk` instruction. */
2837
+static void __attribute__((unused))
2838
+tcg_out_opc_vmulwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2839
+{
2840
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWEV_Q_DU_D, vd, vj, vk));
2841
+}
2842
+
2843
+/* Emits the `vmulwod.h.bu.b vd, vj, vk` instruction. */
2844
+static void __attribute__((unused))
2845
+tcg_out_opc_vmulwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2846
+{
2847
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_H_BU_B, vd, vj, vk));
2848
+}
2849
+
2850
+/* Emits the `vmulwod.w.hu.h vd, vj, vk` instruction. */
2851
+static void __attribute__((unused))
2852
+tcg_out_opc_vmulwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2853
+{
2854
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_W_HU_H, vd, vj, vk));
2855
+}
2856
+
2857
+/* Emits the `vmulwod.d.wu.w vd, vj, vk` instruction. */
2858
+static void __attribute__((unused))
2859
+tcg_out_opc_vmulwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2860
+{
2861
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_D_WU_W, vd, vj, vk));
2862
+}
2863
+
2864
+/* Emits the `vmulwod.q.du.d vd, vj, vk` instruction. */
2865
+static void __attribute__((unused))
2866
+tcg_out_opc_vmulwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2867
+{
2868
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMULWOD_Q_DU_D, vd, vj, vk));
2869
+}
2870
+
2871
+/* Emits the `vmadd.b vd, vj, vk` instruction. */
2872
+static void __attribute__((unused))
2873
+tcg_out_opc_vmadd_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2874
+{
2875
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_B, vd, vj, vk));
2876
+}
2877
+
2878
+/* Emits the `vmadd.h vd, vj, vk` instruction. */
2879
+static void __attribute__((unused))
2880
+tcg_out_opc_vmadd_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2881
+{
2882
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_H, vd, vj, vk));
2883
+}
2884
+
2885
+/* Emits the `vmadd.w vd, vj, vk` instruction. */
2886
+static void __attribute__((unused))
2887
+tcg_out_opc_vmadd_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2888
+{
2889
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_W, vd, vj, vk));
2890
+}
2891
+
2892
+/* Emits the `vmadd.d vd, vj, vk` instruction. */
2893
+static void __attribute__((unused))
2894
+tcg_out_opc_vmadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2895
+{
2896
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADD_D, vd, vj, vk));
2897
+}
2898
+
2899
+/* Emits the `vmsub.b vd, vj, vk` instruction. */
2900
+static void __attribute__((unused))
2901
+tcg_out_opc_vmsub_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2902
+{
2903
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_B, vd, vj, vk));
2904
+}
2905
+
2906
+/* Emits the `vmsub.h vd, vj, vk` instruction. */
2907
+static void __attribute__((unused))
2908
+tcg_out_opc_vmsub_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2909
+{
2910
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_H, vd, vj, vk));
2911
+}
2912
+
2913
+/* Emits the `vmsub.w vd, vj, vk` instruction. */
2914
+static void __attribute__((unused))
2915
+tcg_out_opc_vmsub_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2916
+{
2917
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_W, vd, vj, vk));
2918
+}
2919
+
2920
+/* Emits the `vmsub.d vd, vj, vk` instruction. */
2921
+static void __attribute__((unused))
2922
+tcg_out_opc_vmsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2923
+{
2924
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMSUB_D, vd, vj, vk));
2925
+}
2926
+
2927
+/* Emits the `vmaddwev.h.b vd, vj, vk` instruction. */
2928
+static void __attribute__((unused))
2929
+tcg_out_opc_vmaddwev_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2930
+{
2931
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_B, vd, vj, vk));
2932
+}
2933
+
2934
+/* Emits the `vmaddwev.w.h vd, vj, vk` instruction. */
2935
+static void __attribute__((unused))
2936
+tcg_out_opc_vmaddwev_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2937
+{
2938
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_H, vd, vj, vk));
2939
+}
2940
+
2941
+/* Emits the `vmaddwev.d.w vd, vj, vk` instruction. */
2942
+static void __attribute__((unused))
2943
+tcg_out_opc_vmaddwev_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2944
+{
2945
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_W, vd, vj, vk));
2946
+}
2947
+
2948
+/* Emits the `vmaddwev.q.d vd, vj, vk` instruction. */
2949
+static void __attribute__((unused))
2950
+tcg_out_opc_vmaddwev_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2951
+{
2952
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_D, vd, vj, vk));
2953
+}
2954
+
2955
+/* Emits the `vmaddwod.h.b vd, vj, vk` instruction. */
2956
+static void __attribute__((unused))
2957
+tcg_out_opc_vmaddwod_h_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2958
+{
2959
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_B, vd, vj, vk));
2960
+}
2961
+
2962
+/* Emits the `vmaddwod.w.h vd, vj, vk` instruction. */
2963
+static void __attribute__((unused))
2964
+tcg_out_opc_vmaddwod_w_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2965
+{
2966
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_H, vd, vj, vk));
2967
+}
2968
+
2969
+/* Emits the `vmaddwod.d.w vd, vj, vk` instruction. */
2970
+static void __attribute__((unused))
2971
+tcg_out_opc_vmaddwod_d_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2972
+{
2973
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_W, vd, vj, vk));
2974
+}
2975
+
2976
+/* Emits the `vmaddwod.q.d vd, vj, vk` instruction. */
2977
+static void __attribute__((unused))
2978
+tcg_out_opc_vmaddwod_q_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2979
+{
2980
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_D, vd, vj, vk));
2981
+}
2982
+
2983
+/* Emits the `vmaddwev.h.bu vd, vj, vk` instruction. */
2984
+static void __attribute__((unused))
2985
+tcg_out_opc_vmaddwev_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2986
+{
2987
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU, vd, vj, vk));
2988
+}
2989
+
2990
+/* Emits the `vmaddwev.w.hu vd, vj, vk` instruction. */
2991
+static void __attribute__((unused))
2992
+tcg_out_opc_vmaddwev_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
2993
+{
2994
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU, vd, vj, vk));
2995
+}
2996
+
2997
+/* Emits the `vmaddwev.d.wu vd, vj, vk` instruction. */
2998
+static void __attribute__((unused))
2999
+tcg_out_opc_vmaddwev_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3000
+{
3001
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU, vd, vj, vk));
3002
+}
3003
+
3004
+/* Emits the `vmaddwev.q.du vd, vj, vk` instruction. */
3005
+static void __attribute__((unused))
3006
+tcg_out_opc_vmaddwev_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3007
+{
3008
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU, vd, vj, vk));
3009
+}
3010
+
3011
+/* Emits the `vmaddwod.h.bu vd, vj, vk` instruction. */
3012
+static void __attribute__((unused))
3013
+tcg_out_opc_vmaddwod_h_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3014
+{
3015
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU, vd, vj, vk));
3016
+}
3017
+
3018
+/* Emits the `vmaddwod.w.hu vd, vj, vk` instruction. */
3019
+static void __attribute__((unused))
3020
+tcg_out_opc_vmaddwod_w_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3021
+{
3022
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU, vd, vj, vk));
3023
+}
3024
+
3025
+/* Emits the `vmaddwod.d.wu vd, vj, vk` instruction. */
3026
+static void __attribute__((unused))
3027
+tcg_out_opc_vmaddwod_d_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3028
+{
3029
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU, vd, vj, vk));
3030
+}
3031
+
3032
+/* Emits the `vmaddwod.q.du vd, vj, vk` instruction. */
3033
+static void __attribute__((unused))
3034
+tcg_out_opc_vmaddwod_q_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3035
+{
3036
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU, vd, vj, vk));
3037
+}
3038
+
3039
+/* Emits the `vmaddwev.h.bu.b vd, vj, vk` instruction. */
3040
+static void __attribute__((unused))
3041
+tcg_out_opc_vmaddwev_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3042
+{
3043
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_H_BU_B, vd, vj, vk));
3044
+}
3045
+
3046
+/* Emits the `vmaddwev.w.hu.h vd, vj, vk` instruction. */
3047
+static void __attribute__((unused))
3048
+tcg_out_opc_vmaddwev_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3049
+{
3050
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_W_HU_H, vd, vj, vk));
3051
+}
3052
+
3053
+/* Emits the `vmaddwev.d.wu.w vd, vj, vk` instruction. */
3054
+static void __attribute__((unused))
3055
+tcg_out_opc_vmaddwev_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3056
+{
3057
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_D_WU_W, vd, vj, vk));
3058
+}
3059
+
3060
+/* Emits the `vmaddwev.q.du.d vd, vj, vk` instruction. */
3061
+static void __attribute__((unused))
3062
+tcg_out_opc_vmaddwev_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3063
+{
3064
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWEV_Q_DU_D, vd, vj, vk));
3065
+}
3066
+
3067
+/* Emits the `vmaddwod.h.bu.b vd, vj, vk` instruction. */
3068
+static void __attribute__((unused))
3069
+tcg_out_opc_vmaddwod_h_bu_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3070
+{
3071
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_H_BU_B, vd, vj, vk));
3072
+}
3073
+
3074
+/* Emits the `vmaddwod.w.hu.h vd, vj, vk` instruction. */
3075
+static void __attribute__((unused))
3076
+tcg_out_opc_vmaddwod_w_hu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3077
+{
3078
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_W_HU_H, vd, vj, vk));
3079
+}
3080
+
3081
+/* Emits the `vmaddwod.d.wu.w vd, vj, vk` instruction. */
3082
+static void __attribute__((unused))
3083
+tcg_out_opc_vmaddwod_d_wu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3084
+{
3085
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_D_WU_W, vd, vj, vk));
3086
+}
3087
+
3088
+/* Emits the `vmaddwod.q.du.d vd, vj, vk` instruction. */
3089
+static void __attribute__((unused))
3090
+tcg_out_opc_vmaddwod_q_du_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3091
+{
3092
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMADDWOD_Q_DU_D, vd, vj, vk));
3093
+}
3094
+
3095
+/* Emits the `vdiv.b vd, vj, vk` instruction. */
3096
+static void __attribute__((unused))
3097
+tcg_out_opc_vdiv_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3098
+{
3099
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_B, vd, vj, vk));
3100
+}
3101
+
3102
+/* Emits the `vdiv.h vd, vj, vk` instruction. */
3103
+static void __attribute__((unused))
3104
+tcg_out_opc_vdiv_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3105
+{
3106
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_H, vd, vj, vk));
3107
+}
3108
+
3109
+/* Emits the `vdiv.w vd, vj, vk` instruction. */
3110
+static void __attribute__((unused))
3111
+tcg_out_opc_vdiv_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3112
+{
3113
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_W, vd, vj, vk));
3114
+}
3115
+
3116
+/* Emits the `vdiv.d vd, vj, vk` instruction. */
3117
+static void __attribute__((unused))
3118
+tcg_out_opc_vdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3119
+{
3120
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_D, vd, vj, vk));
3121
+}
3122
+
3123
+/* Emits the `vmod.b vd, vj, vk` instruction. */
3124
+static void __attribute__((unused))
3125
+tcg_out_opc_vmod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3126
+{
3127
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_B, vd, vj, vk));
3128
+}
3129
+
3130
+/* Emits the `vmod.h vd, vj, vk` instruction. */
3131
+static void __attribute__((unused))
3132
+tcg_out_opc_vmod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3133
+{
3134
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_H, vd, vj, vk));
3135
+}
3136
+
3137
+/* Emits the `vmod.w vd, vj, vk` instruction. */
3138
+static void __attribute__((unused))
3139
+tcg_out_opc_vmod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3140
+{
3141
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_W, vd, vj, vk));
3142
+}
3143
+
3144
+/* Emits the `vmod.d vd, vj, vk` instruction. */
3145
+static void __attribute__((unused))
3146
+tcg_out_opc_vmod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3147
+{
3148
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_D, vd, vj, vk));
3149
+}
3150
+
3151
+/* Emits the `vdiv.bu vd, vj, vk` instruction. */
3152
+static void __attribute__((unused))
3153
+tcg_out_opc_vdiv_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3154
+{
3155
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_BU, vd, vj, vk));
3156
+}
3157
+
3158
+/* Emits the `vdiv.hu vd, vj, vk` instruction. */
3159
+static void __attribute__((unused))
3160
+tcg_out_opc_vdiv_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3161
+{
3162
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_HU, vd, vj, vk));
3163
+}
3164
+
3165
+/* Emits the `vdiv.wu vd, vj, vk` instruction. */
3166
+static void __attribute__((unused))
3167
+tcg_out_opc_vdiv_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3168
+{
3169
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_WU, vd, vj, vk));
3170
+}
3171
+
3172
+/* Emits the `vdiv.du vd, vj, vk` instruction. */
3173
+static void __attribute__((unused))
3174
+tcg_out_opc_vdiv_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3175
+{
3176
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VDIV_DU, vd, vj, vk));
3177
+}
3178
+
3179
+/* Emits the `vmod.bu vd, vj, vk` instruction. */
3180
+static void __attribute__((unused))
3181
+tcg_out_opc_vmod_bu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3182
+{
3183
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_BU, vd, vj, vk));
3184
+}
3185
+
3186
+/* Emits the `vmod.hu vd, vj, vk` instruction. */
3187
+static void __attribute__((unused))
3188
+tcg_out_opc_vmod_hu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3189
+{
3190
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_HU, vd, vj, vk));
3191
+}
3192
+
3193
+/* Emits the `vmod.wu vd, vj, vk` instruction. */
3194
+static void __attribute__((unused))
3195
+tcg_out_opc_vmod_wu(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3196
+{
3197
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_WU, vd, vj, vk));
3198
+}
3199
+
3200
+/* Emits the `vmod.du vd, vj, vk` instruction. */
3201
+static void __attribute__((unused))
3202
+tcg_out_opc_vmod_du(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3203
+{
3204
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VMOD_DU, vd, vj, vk));
3205
+}
3206
+
3207
+/* Emits the `vsll.b vd, vj, vk` instruction. */
3208
+static void __attribute__((unused))
3209
+tcg_out_opc_vsll_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3210
+{
3211
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_B, vd, vj, vk));
3212
+}
3213
+
3214
+/* Emits the `vsll.h vd, vj, vk` instruction. */
3215
+static void __attribute__((unused))
3216
+tcg_out_opc_vsll_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3217
+{
3218
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_H, vd, vj, vk));
3219
+}
3220
+
3221
+/* Emits the `vsll.w vd, vj, vk` instruction. */
3222
+static void __attribute__((unused))
3223
+tcg_out_opc_vsll_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3224
+{
3225
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_W, vd, vj, vk));
3226
+}
3227
+
3228
+/* Emits the `vsll.d vd, vj, vk` instruction. */
3229
+static void __attribute__((unused))
3230
+tcg_out_opc_vsll_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3231
+{
3232
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSLL_D, vd, vj, vk));
3233
+}
3234
+
3235
+/* Emits the `vsrl.b vd, vj, vk` instruction. */
3236
+static void __attribute__((unused))
3237
+tcg_out_opc_vsrl_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3238
+{
3239
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_B, vd, vj, vk));
3240
+}
3241
+
3242
+/* Emits the `vsrl.h vd, vj, vk` instruction. */
3243
+static void __attribute__((unused))
3244
+tcg_out_opc_vsrl_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3245
+{
3246
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_H, vd, vj, vk));
3247
+}
3248
+
3249
+/* Emits the `vsrl.w vd, vj, vk` instruction. */
3250
+static void __attribute__((unused))
3251
+tcg_out_opc_vsrl_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3252
+{
3253
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_W, vd, vj, vk));
3254
+}
3255
+
3256
+/* Emits the `vsrl.d vd, vj, vk` instruction. */
3257
+static void __attribute__((unused))
3258
+tcg_out_opc_vsrl_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3259
+{
3260
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRL_D, vd, vj, vk));
3261
+}
3262
+
3263
+/* Emits the `vsra.b vd, vj, vk` instruction. */
3264
+static void __attribute__((unused))
3265
+tcg_out_opc_vsra_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3266
+{
3267
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_B, vd, vj, vk));
3268
+}
3269
+
3270
+/* Emits the `vsra.h vd, vj, vk` instruction. */
3271
+static void __attribute__((unused))
3272
+tcg_out_opc_vsra_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3273
+{
3274
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_H, vd, vj, vk));
3275
+}
3276
+
3277
+/* Emits the `vsra.w vd, vj, vk` instruction. */
3278
+static void __attribute__((unused))
3279
+tcg_out_opc_vsra_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3280
+{
3281
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_W, vd, vj, vk));
3282
+}
3283
+
3284
+/* Emits the `vsra.d vd, vj, vk` instruction. */
3285
+static void __attribute__((unused))
3286
+tcg_out_opc_vsra_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3287
+{
3288
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRA_D, vd, vj, vk));
3289
+}
3290
+
3291
+/* Emits the `vrotr.b vd, vj, vk` instruction. */
3292
+static void __attribute__((unused))
3293
+tcg_out_opc_vrotr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3294
+{
3295
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_B, vd, vj, vk));
3296
+}
3297
+
3298
+/* Emits the `vrotr.h vd, vj, vk` instruction. */
3299
+static void __attribute__((unused))
3300
+tcg_out_opc_vrotr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3301
+{
3302
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_H, vd, vj, vk));
3303
+}
3304
+
3305
+/* Emits the `vrotr.w vd, vj, vk` instruction. */
3306
+static void __attribute__((unused))
3307
+tcg_out_opc_vrotr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3308
+{
3309
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_W, vd, vj, vk));
3310
+}
3311
+
3312
+/* Emits the `vrotr.d vd, vj, vk` instruction. */
3313
+static void __attribute__((unused))
3314
+tcg_out_opc_vrotr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3315
+{
3316
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VROTR_D, vd, vj, vk));
3317
+}
3318
+
3319
+/* Emits the `vsrlr.b vd, vj, vk` instruction. */
3320
+static void __attribute__((unused))
3321
+tcg_out_opc_vsrlr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3322
+{
3323
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_B, vd, vj, vk));
3324
+}
3325
+
3326
+/* Emits the `vsrlr.h vd, vj, vk` instruction. */
3327
+static void __attribute__((unused))
3328
+tcg_out_opc_vsrlr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3329
+{
3330
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_H, vd, vj, vk));
3331
+}
3332
+
3333
+/* Emits the `vsrlr.w vd, vj, vk` instruction. */
3334
+static void __attribute__((unused))
3335
+tcg_out_opc_vsrlr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3336
+{
3337
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_W, vd, vj, vk));
3338
+}
3339
+
3340
+/* Emits the `vsrlr.d vd, vj, vk` instruction. */
3341
+static void __attribute__((unused))
3342
+tcg_out_opc_vsrlr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3343
+{
3344
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLR_D, vd, vj, vk));
3345
+}
3346
+
3347
+/* Emits the `vsrar.b vd, vj, vk` instruction. */
3348
+static void __attribute__((unused))
3349
+tcg_out_opc_vsrar_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3350
+{
3351
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_B, vd, vj, vk));
3352
+}
3353
+
3354
+/* Emits the `vsrar.h vd, vj, vk` instruction. */
3355
+static void __attribute__((unused))
3356
+tcg_out_opc_vsrar_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3357
+{
3358
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_H, vd, vj, vk));
3359
+}
3360
+
3361
+/* Emits the `vsrar.w vd, vj, vk` instruction. */
3362
+static void __attribute__((unused))
3363
+tcg_out_opc_vsrar_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3364
+{
3365
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_W, vd, vj, vk));
3366
+}
3367
+
3368
+/* Emits the `vsrar.d vd, vj, vk` instruction. */
3369
+static void __attribute__((unused))
3370
+tcg_out_opc_vsrar_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3371
+{
3372
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAR_D, vd, vj, vk));
3373
+}
3374
+
3375
+/* Emits the `vsrln.b.h vd, vj, vk` instruction. */
3376
+static void __attribute__((unused))
3377
+tcg_out_opc_vsrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3378
+{
3379
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_B_H, vd, vj, vk));
3380
+}
3381
+
3382
+/* Emits the `vsrln.h.w vd, vj, vk` instruction. */
3383
+static void __attribute__((unused))
3384
+tcg_out_opc_vsrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3385
+{
3386
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_H_W, vd, vj, vk));
3387
+}
3388
+
3389
+/* Emits the `vsrln.w.d vd, vj, vk` instruction. */
3390
+static void __attribute__((unused))
3391
+tcg_out_opc_vsrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3392
+{
3393
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLN_W_D, vd, vj, vk));
3394
+}
3395
+
3396
+/* Emits the `vsran.b.h vd, vj, vk` instruction. */
3397
+static void __attribute__((unused))
3398
+tcg_out_opc_vsran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3399
+{
3400
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_B_H, vd, vj, vk));
3401
+}
3402
+
3403
+/* Emits the `vsran.h.w vd, vj, vk` instruction. */
3404
+static void __attribute__((unused))
3405
+tcg_out_opc_vsran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3406
+{
3407
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_H_W, vd, vj, vk));
3408
+}
3409
+
3410
+/* Emits the `vsran.w.d vd, vj, vk` instruction. */
3411
+static void __attribute__((unused))
3412
+tcg_out_opc_vsran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3413
+{
3414
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRAN_W_D, vd, vj, vk));
3415
+}
3416
+
3417
+/* Emits the `vsrlrn.b.h vd, vj, vk` instruction. */
3418
+static void __attribute__((unused))
3419
+tcg_out_opc_vsrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3420
+{
3421
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_B_H, vd, vj, vk));
3422
+}
3423
+
3424
+/* Emits the `vsrlrn.h.w vd, vj, vk` instruction. */
3425
+static void __attribute__((unused))
3426
+tcg_out_opc_vsrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3427
+{
3428
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_H_W, vd, vj, vk));
3429
+}
3430
+
3431
+/* Emits the `vsrlrn.w.d vd, vj, vk` instruction. */
3432
+static void __attribute__((unused))
3433
+tcg_out_opc_vsrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3434
+{
3435
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRLRN_W_D, vd, vj, vk));
3436
+}
3437
+
3438
+/* Emits the `vsrarn.b.h vd, vj, vk` instruction. */
3439
+static void __attribute__((unused))
3440
+tcg_out_opc_vsrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3441
+{
3442
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_B_H, vd, vj, vk));
3443
+}
3444
+
3445
+/* Emits the `vsrarn.h.w vd, vj, vk` instruction. */
3446
+static void __attribute__((unused))
3447
+tcg_out_opc_vsrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3448
+{
3449
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_H_W, vd, vj, vk));
3450
+}
3451
+
3452
+/* Emits the `vsrarn.w.d vd, vj, vk` instruction. */
3453
+static void __attribute__((unused))
3454
+tcg_out_opc_vsrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3455
+{
3456
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSRARN_W_D, vd, vj, vk));
3457
+}
3458
+
3459
+/* Emits the `vssrln.b.h vd, vj, vk` instruction. */
3460
+static void __attribute__((unused))
3461
+tcg_out_opc_vssrln_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3462
+{
3463
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_B_H, vd, vj, vk));
3464
+}
3465
+
3466
+/* Emits the `vssrln.h.w vd, vj, vk` instruction. */
3467
+static void __attribute__((unused))
3468
+tcg_out_opc_vssrln_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3469
+{
3470
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_H_W, vd, vj, vk));
3471
+}
3472
+
3473
+/* Emits the `vssrln.w.d vd, vj, vk` instruction. */
3474
+static void __attribute__((unused))
3475
+tcg_out_opc_vssrln_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3476
+{
3477
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_W_D, vd, vj, vk));
3478
+}
3479
+
3480
+/* Emits the `vssran.b.h vd, vj, vk` instruction. */
3481
+static void __attribute__((unused))
3482
+tcg_out_opc_vssran_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3483
+{
3484
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_B_H, vd, vj, vk));
3485
+}
3486
+
3487
+/* Emits the `vssran.h.w vd, vj, vk` instruction. */
3488
+static void __attribute__((unused))
3489
+tcg_out_opc_vssran_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3490
+{
3491
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_H_W, vd, vj, vk));
3492
+}
3493
+
3494
+/* Emits the `vssran.w.d vd, vj, vk` instruction. */
3495
+static void __attribute__((unused))
3496
+tcg_out_opc_vssran_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3497
+{
3498
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_W_D, vd, vj, vk));
3499
+}
3500
+
3501
+/* Emits the `vssrlrn.b.h vd, vj, vk` instruction. */
3502
+static void __attribute__((unused))
3503
+tcg_out_opc_vssrlrn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3504
+{
3505
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_B_H, vd, vj, vk));
3506
+}
3507
+
3508
+/* Emits the `vssrlrn.h.w vd, vj, vk` instruction. */
3509
+static void __attribute__((unused))
3510
+tcg_out_opc_vssrlrn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3511
+{
3512
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_H_W, vd, vj, vk));
3513
+}
3514
+
3515
+/* Emits the `vssrlrn.w.d vd, vj, vk` instruction. */
3516
+static void __attribute__((unused))
3517
+tcg_out_opc_vssrlrn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3518
+{
3519
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_W_D, vd, vj, vk));
3520
+}
3521
+
3522
+/* Emits the `vssrarn.b.h vd, vj, vk` instruction. */
3523
+static void __attribute__((unused))
3524
+tcg_out_opc_vssrarn_b_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3525
+{
3526
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_B_H, vd, vj, vk));
3527
+}
3528
+
3529
+/* Emits the `vssrarn.h.w vd, vj, vk` instruction. */
3530
+static void __attribute__((unused))
3531
+tcg_out_opc_vssrarn_h_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3532
+{
3533
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_H_W, vd, vj, vk));
3534
+}
3535
+
3536
+/* Emits the `vssrarn.w.d vd, vj, vk` instruction. */
3537
+static void __attribute__((unused))
3538
+tcg_out_opc_vssrarn_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3539
+{
3540
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_W_D, vd, vj, vk));
3541
+}
3542
+
3543
+/* Emits the `vssrln.bu.h vd, vj, vk` instruction. */
3544
+static void __attribute__((unused))
3545
+tcg_out_opc_vssrln_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3546
+{
3547
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_BU_H, vd, vj, vk));
3548
+}
3549
+
3550
+/* Emits the `vssrln.hu.w vd, vj, vk` instruction. */
3551
+static void __attribute__((unused))
3552
+tcg_out_opc_vssrln_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3553
+{
3554
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_HU_W, vd, vj, vk));
3555
+}
3556
+
3557
+/* Emits the `vssrln.wu.d vd, vj, vk` instruction. */
3558
+static void __attribute__((unused))
3559
+tcg_out_opc_vssrln_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3560
+{
3561
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLN_WU_D, vd, vj, vk));
3562
+}
3563
+
3564
+/* Emits the `vssran.bu.h vd, vj, vk` instruction. */
3565
+static void __attribute__((unused))
3566
+tcg_out_opc_vssran_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3567
+{
3568
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_BU_H, vd, vj, vk));
3569
+}
3570
+
3571
+/* Emits the `vssran.hu.w vd, vj, vk` instruction. */
3572
+static void __attribute__((unused))
3573
+tcg_out_opc_vssran_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3574
+{
3575
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_HU_W, vd, vj, vk));
3576
+}
3577
+
3578
+/* Emits the `vssran.wu.d vd, vj, vk` instruction. */
3579
+static void __attribute__((unused))
3580
+tcg_out_opc_vssran_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3581
+{
3582
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRAN_WU_D, vd, vj, vk));
3583
+}
3584
+
3585
+/* Emits the `vssrlrn.bu.h vd, vj, vk` instruction. */
3586
+static void __attribute__((unused))
3587
+tcg_out_opc_vssrlrn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3588
+{
3589
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_BU_H, vd, vj, vk));
3590
+}
3591
+
3592
+/* Emits the `vssrlrn.hu.w vd, vj, vk` instruction. */
3593
+static void __attribute__((unused))
3594
+tcg_out_opc_vssrlrn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3595
+{
3596
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_HU_W, vd, vj, vk));
3597
+}
3598
+
3599
+/* Emits the `vssrlrn.wu.d vd, vj, vk` instruction. */
3600
+static void __attribute__((unused))
3601
+tcg_out_opc_vssrlrn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3602
+{
3603
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRLRN_WU_D, vd, vj, vk));
3604
+}
3605
+
3606
+/* Emits the `vssrarn.bu.h vd, vj, vk` instruction. */
3607
+static void __attribute__((unused))
3608
+tcg_out_opc_vssrarn_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3609
+{
3610
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_BU_H, vd, vj, vk));
3611
+}
3612
+
3613
+/* Emits the `vssrarn.hu.w vd, vj, vk` instruction. */
3614
+static void __attribute__((unused))
3615
+tcg_out_opc_vssrarn_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3616
+{
3617
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_HU_W, vd, vj, vk));
3618
+}
3619
+
3620
+/* Emits the `vssrarn.wu.d vd, vj, vk` instruction. */
3621
+static void __attribute__((unused))
3622
+tcg_out_opc_vssrarn_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3623
+{
3624
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSSRARN_WU_D, vd, vj, vk));
3625
+}
3626
+
3627
+/* Emits the `vbitclr.b vd, vj, vk` instruction. */
3628
+static void __attribute__((unused))
3629
+tcg_out_opc_vbitclr_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3630
+{
3631
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_B, vd, vj, vk));
3632
+}
3633
+
3634
+/* Emits the `vbitclr.h vd, vj, vk` instruction. */
3635
+static void __attribute__((unused))
3636
+tcg_out_opc_vbitclr_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3637
+{
3638
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_H, vd, vj, vk));
3639
+}
3640
+
3641
+/* Emits the `vbitclr.w vd, vj, vk` instruction. */
3642
+static void __attribute__((unused))
3643
+tcg_out_opc_vbitclr_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3644
+{
3645
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_W, vd, vj, vk));
3646
+}
3647
+
3648
+/* Emits the `vbitclr.d vd, vj, vk` instruction. */
3649
+static void __attribute__((unused))
3650
+tcg_out_opc_vbitclr_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3651
+{
3652
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITCLR_D, vd, vj, vk));
3653
+}
3654
+
3655
+/* Emits the `vbitset.b vd, vj, vk` instruction. */
3656
+static void __attribute__((unused))
3657
+tcg_out_opc_vbitset_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3658
+{
3659
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_B, vd, vj, vk));
3660
+}
3661
+
3662
+/* Emits the `vbitset.h vd, vj, vk` instruction. */
3663
+static void __attribute__((unused))
3664
+tcg_out_opc_vbitset_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3665
+{
3666
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_H, vd, vj, vk));
3667
+}
3668
+
3669
+/* Emits the `vbitset.w vd, vj, vk` instruction. */
3670
+static void __attribute__((unused))
3671
+tcg_out_opc_vbitset_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3672
+{
3673
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_W, vd, vj, vk));
3674
+}
3675
+
3676
+/* Emits the `vbitset.d vd, vj, vk` instruction. */
3677
+static void __attribute__((unused))
3678
+tcg_out_opc_vbitset_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3679
+{
3680
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITSET_D, vd, vj, vk));
3681
+}
3682
+
3683
+/* Emits the `vbitrev.b vd, vj, vk` instruction. */
3684
+static void __attribute__((unused))
3685
+tcg_out_opc_vbitrev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3686
+{
3687
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_B, vd, vj, vk));
3688
+}
3689
+
3690
+/* Emits the `vbitrev.h vd, vj, vk` instruction. */
3691
+static void __attribute__((unused))
3692
+tcg_out_opc_vbitrev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3693
+{
3694
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_H, vd, vj, vk));
3695
+}
3696
+
3697
+/* Emits the `vbitrev.w vd, vj, vk` instruction. */
3698
+static void __attribute__((unused))
3699
+tcg_out_opc_vbitrev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3700
+{
3701
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_W, vd, vj, vk));
3702
+}
3703
+
3704
+/* Emits the `vbitrev.d vd, vj, vk` instruction. */
3705
+static void __attribute__((unused))
3706
+tcg_out_opc_vbitrev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3707
+{
3708
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VBITREV_D, vd, vj, vk));
3709
+}
3710
+
3711
+/* Emits the `vpackev.b vd, vj, vk` instruction. */
3712
+static void __attribute__((unused))
3713
+tcg_out_opc_vpackev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3714
+{
3715
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_B, vd, vj, vk));
3716
+}
3717
+
3718
+/* Emits the `vpackev.h vd, vj, vk` instruction. */
3719
+static void __attribute__((unused))
3720
+tcg_out_opc_vpackev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3721
+{
3722
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_H, vd, vj, vk));
3723
+}
3724
+
3725
+/* Emits the `vpackev.w vd, vj, vk` instruction. */
3726
+static void __attribute__((unused))
3727
+tcg_out_opc_vpackev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3728
+{
3729
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_W, vd, vj, vk));
3730
+}
3731
+
3732
+/* Emits the `vpackev.d vd, vj, vk` instruction. */
3733
+static void __attribute__((unused))
3734
+tcg_out_opc_vpackev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3735
+{
3736
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKEV_D, vd, vj, vk));
3737
+}
3738
+
3739
+/* Emits the `vpackod.b vd, vj, vk` instruction. */
3740
+static void __attribute__((unused))
3741
+tcg_out_opc_vpackod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3742
+{
3743
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_B, vd, vj, vk));
3744
+}
3745
+
3746
+/* Emits the `vpackod.h vd, vj, vk` instruction. */
3747
+static void __attribute__((unused))
3748
+tcg_out_opc_vpackod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3749
+{
3750
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_H, vd, vj, vk));
3751
+}
3752
+
3753
+/* Emits the `vpackod.w vd, vj, vk` instruction. */
3754
+static void __attribute__((unused))
3755
+tcg_out_opc_vpackod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3756
+{
3757
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_W, vd, vj, vk));
3758
+}
3759
+
3760
+/* Emits the `vpackod.d vd, vj, vk` instruction. */
3761
+static void __attribute__((unused))
3762
+tcg_out_opc_vpackod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3763
+{
3764
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPACKOD_D, vd, vj, vk));
3765
+}
3766
+
3767
+/* Emits the `vilvl.b vd, vj, vk` instruction. */
3768
+static void __attribute__((unused))
3769
+tcg_out_opc_vilvl_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3770
+{
3771
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_B, vd, vj, vk));
3772
+}
3773
+
3774
+/* Emits the `vilvl.h vd, vj, vk` instruction. */
3775
+static void __attribute__((unused))
3776
+tcg_out_opc_vilvl_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3777
+{
3778
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_H, vd, vj, vk));
3779
+}
3780
+
3781
+/* Emits the `vilvl.w vd, vj, vk` instruction. */
3782
+static void __attribute__((unused))
3783
+tcg_out_opc_vilvl_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3784
+{
3785
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_W, vd, vj, vk));
3786
+}
3787
+
3788
+/* Emits the `vilvl.d vd, vj, vk` instruction. */
3789
+static void __attribute__((unused))
3790
+tcg_out_opc_vilvl_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3791
+{
3792
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVL_D, vd, vj, vk));
3793
+}
3794
+
3795
+/* Emits the `vilvh.b vd, vj, vk` instruction. */
3796
+static void __attribute__((unused))
3797
+tcg_out_opc_vilvh_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3798
+{
3799
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_B, vd, vj, vk));
3800
+}
3801
+
3802
+/* Emits the `vilvh.h vd, vj, vk` instruction. */
3803
+static void __attribute__((unused))
3804
+tcg_out_opc_vilvh_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3805
+{
3806
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_H, vd, vj, vk));
3807
+}
3808
+
3809
+/* Emits the `vilvh.w vd, vj, vk` instruction. */
3810
+static void __attribute__((unused))
3811
+tcg_out_opc_vilvh_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3812
+{
3813
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_W, vd, vj, vk));
3814
+}
3815
+
3816
+/* Emits the `vilvh.d vd, vj, vk` instruction. */
3817
+static void __attribute__((unused))
3818
+tcg_out_opc_vilvh_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3819
+{
3820
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VILVH_D, vd, vj, vk));
3821
+}
3822
+
3823
+/* Emits the `vpickev.b vd, vj, vk` instruction. */
3824
+static void __attribute__((unused))
3825
+tcg_out_opc_vpickev_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3826
+{
3827
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_B, vd, vj, vk));
3828
+}
3829
+
3830
+/* Emits the `vpickev.h vd, vj, vk` instruction. */
3831
+static void __attribute__((unused))
3832
+tcg_out_opc_vpickev_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3833
+{
3834
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_H, vd, vj, vk));
3835
+}
3836
+
3837
+/* Emits the `vpickev.w vd, vj, vk` instruction. */
3838
+static void __attribute__((unused))
3839
+tcg_out_opc_vpickev_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3840
+{
3841
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_W, vd, vj, vk));
3842
+}
3843
+
3844
+/* Emits the `vpickev.d vd, vj, vk` instruction. */
3845
+static void __attribute__((unused))
3846
+tcg_out_opc_vpickev_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3847
+{
3848
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKEV_D, vd, vj, vk));
3849
+}
3850
+
3851
+/* Emits the `vpickod.b vd, vj, vk` instruction. */
3852
+static void __attribute__((unused))
3853
+tcg_out_opc_vpickod_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3854
+{
3855
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_B, vd, vj, vk));
3856
+}
3857
+
3858
+/* Emits the `vpickod.h vd, vj, vk` instruction. */
3859
+static void __attribute__((unused))
3860
+tcg_out_opc_vpickod_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3861
+{
3862
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_H, vd, vj, vk));
3863
+}
3864
+
3865
+/* Emits the `vpickod.w vd, vj, vk` instruction. */
3866
+static void __attribute__((unused))
3867
+tcg_out_opc_vpickod_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3868
+{
3869
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_W, vd, vj, vk));
3870
+}
3871
+
3872
+/* Emits the `vpickod.d vd, vj, vk` instruction. */
3873
+static void __attribute__((unused))
3874
+tcg_out_opc_vpickod_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3875
+{
3876
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VPICKOD_D, vd, vj, vk));
3877
+}
3878
+
3879
+/* Emits the `vreplve.b vd, vj, k` instruction. */
3880
+static void __attribute__((unused))
3881
+tcg_out_opc_vreplve_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3882
+{
3883
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_B, vd, vj, k));
3884
+}
3885
+
3886
+/* Emits the `vreplve.h vd, vj, k` instruction. */
3887
+static void __attribute__((unused))
3888
+tcg_out_opc_vreplve_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3889
+{
3890
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_H, vd, vj, k));
3891
+}
3892
+
3893
+/* Emits the `vreplve.w vd, vj, k` instruction. */
3894
+static void __attribute__((unused))
3895
+tcg_out_opc_vreplve_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3896
+{
3897
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_W, vd, vj, k));
3898
+}
3899
+
3900
+/* Emits the `vreplve.d vd, vj, k` instruction. */
3901
+static void __attribute__((unused))
3902
+tcg_out_opc_vreplve_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg k)
3903
+{
3904
+ tcg_out32(s, encode_vdvjk_insn(OPC_VREPLVE_D, vd, vj, k));
3905
+}
3906
+
3907
+/* Emits the `vand.v vd, vj, vk` instruction. */
3908
+static void __attribute__((unused))
3909
+tcg_out_opc_vand_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3910
+{
3911
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VAND_V, vd, vj, vk));
3912
+}
3913
+
3914
+/* Emits the `vor.v vd, vj, vk` instruction. */
3915
+static void __attribute__((unused))
3916
+tcg_out_opc_vor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3917
+{
3918
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VOR_V, vd, vj, vk));
3919
+}
3920
+
3921
+/* Emits the `vxor.v vd, vj, vk` instruction. */
3922
+static void __attribute__((unused))
3923
+tcg_out_opc_vxor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3924
+{
3925
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VXOR_V, vd, vj, vk));
3926
+}
3927
+
3928
+/* Emits the `vnor.v vd, vj, vk` instruction. */
3929
+static void __attribute__((unused))
3930
+tcg_out_opc_vnor_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3931
+{
3932
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VNOR_V, vd, vj, vk));
3933
+}
3934
+
3935
+/* Emits the `vandn.v vd, vj, vk` instruction. */
3936
+static void __attribute__((unused))
3937
+tcg_out_opc_vandn_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3938
+{
3939
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VANDN_V, vd, vj, vk));
3940
+}
3941
+
3942
+/* Emits the `vorn.v vd, vj, vk` instruction. */
3943
+static void __attribute__((unused))
3944
+tcg_out_opc_vorn_v(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3945
+{
3946
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VORN_V, vd, vj, vk));
3947
+}
3948
+
3949
+/* Emits the `vfrstp.b vd, vj, vk` instruction. */
3950
+static void __attribute__((unused))
3951
+tcg_out_opc_vfrstp_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3952
+{
3953
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_B, vd, vj, vk));
3954
+}
3955
+
3956
+/* Emits the `vfrstp.h vd, vj, vk` instruction. */
3957
+static void __attribute__((unused))
3958
+tcg_out_opc_vfrstp_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3959
+{
3960
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFRSTP_H, vd, vj, vk));
3961
+}
3962
+
3963
+/* Emits the `vadd.q vd, vj, vk` instruction. */
3964
+static void __attribute__((unused))
3965
+tcg_out_opc_vadd_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3966
+{
3967
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VADD_Q, vd, vj, vk));
3968
+}
3969
+
3970
+/* Emits the `vsub.q vd, vj, vk` instruction. */
3971
+static void __attribute__((unused))
3972
+tcg_out_opc_vsub_q(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3973
+{
3974
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSUB_Q, vd, vj, vk));
3975
+}
3976
+
3977
+/* Emits the `vsigncov.b vd, vj, vk` instruction. */
3978
+static void __attribute__((unused))
3979
+tcg_out_opc_vsigncov_b(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3980
+{
3981
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_B, vd, vj, vk));
3982
+}
3983
+
3984
+/* Emits the `vsigncov.h vd, vj, vk` instruction. */
3985
+static void __attribute__((unused))
3986
+tcg_out_opc_vsigncov_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3987
+{
3988
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_H, vd, vj, vk));
3989
+}
3990
+
3991
+/* Emits the `vsigncov.w vd, vj, vk` instruction. */
3992
+static void __attribute__((unused))
3993
+tcg_out_opc_vsigncov_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
3994
+{
3995
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_W, vd, vj, vk));
3996
+}
3997
+
3998
+/* Emits the `vsigncov.d vd, vj, vk` instruction. */
3999
+static void __attribute__((unused))
4000
+tcg_out_opc_vsigncov_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4001
+{
4002
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSIGNCOV_D, vd, vj, vk));
4003
+}
4004
+
4005
+/* Emits the `vfadd.s vd, vj, vk` instruction. */
4006
+static void __attribute__((unused))
4007
+tcg_out_opc_vfadd_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4008
+{
4009
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_S, vd, vj, vk));
4010
+}
4011
+
4012
+/* Emits the `vfadd.d vd, vj, vk` instruction. */
4013
+static void __attribute__((unused))
4014
+tcg_out_opc_vfadd_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4015
+{
4016
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFADD_D, vd, vj, vk));
4017
+}
4018
+
4019
+/* Emits the `vfsub.s vd, vj, vk` instruction. */
4020
+static void __attribute__((unused))
4021
+tcg_out_opc_vfsub_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4022
+{
4023
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_S, vd, vj, vk));
4024
+}
4025
+
4026
+/* Emits the `vfsub.d vd, vj, vk` instruction. */
4027
+static void __attribute__((unused))
4028
+tcg_out_opc_vfsub_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4029
+{
4030
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFSUB_D, vd, vj, vk));
4031
+}
4032
+
4033
+/* Emits the `vfmul.s vd, vj, vk` instruction. */
4034
+static void __attribute__((unused))
4035
+tcg_out_opc_vfmul_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4036
+{
4037
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_S, vd, vj, vk));
4038
+}
4039
+
4040
+/* Emits the `vfmul.d vd, vj, vk` instruction. */
4041
+static void __attribute__((unused))
4042
+tcg_out_opc_vfmul_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4043
+{
4044
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMUL_D, vd, vj, vk));
4045
+}
4046
+
4047
+/* Emits the `vfdiv.s vd, vj, vk` instruction. */
4048
+static void __attribute__((unused))
4049
+tcg_out_opc_vfdiv_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4050
+{
4051
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_S, vd, vj, vk));
4052
+}
4053
+
4054
+/* Emits the `vfdiv.d vd, vj, vk` instruction. */
4055
+static void __attribute__((unused))
4056
+tcg_out_opc_vfdiv_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4057
+{
4058
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFDIV_D, vd, vj, vk));
4059
+}
4060
+
4061
+/* Emits the `vfmax.s vd, vj, vk` instruction. */
4062
+static void __attribute__((unused))
4063
+tcg_out_opc_vfmax_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4064
+{
4065
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_S, vd, vj, vk));
4066
+}
4067
+
4068
+/* Emits the `vfmax.d vd, vj, vk` instruction. */
4069
+static void __attribute__((unused))
4070
+tcg_out_opc_vfmax_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4071
+{
4072
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAX_D, vd, vj, vk));
4073
+}
4074
+
4075
+/* Emits the `vfmin.s vd, vj, vk` instruction. */
4076
+static void __attribute__((unused))
4077
+tcg_out_opc_vfmin_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4078
+{
4079
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_S, vd, vj, vk));
4080
+}
4081
+
4082
+/* Emits the `vfmin.d vd, vj, vk` instruction. */
4083
+static void __attribute__((unused))
4084
+tcg_out_opc_vfmin_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4085
+{
4086
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMIN_D, vd, vj, vk));
4087
+}
4088
+
4089
+/* Emits the `vfmaxa.s vd, vj, vk` instruction. */
4090
+static void __attribute__((unused))
4091
+tcg_out_opc_vfmaxa_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4092
+{
4093
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_S, vd, vj, vk));
4094
+}
4095
+
4096
+/* Emits the `vfmaxa.d vd, vj, vk` instruction. */
4097
+static void __attribute__((unused))
4098
+tcg_out_opc_vfmaxa_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4099
+{
4100
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMAXA_D, vd, vj, vk));
4101
+}
4102
+
4103
+/* Emits the `vfmina.s vd, vj, vk` instruction. */
4104
+static void __attribute__((unused))
4105
+tcg_out_opc_vfmina_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4106
+{
4107
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_S, vd, vj, vk));
4108
+}
4109
+
4110
+/* Emits the `vfmina.d vd, vj, vk` instruction. */
4111
+static void __attribute__((unused))
4112
+tcg_out_opc_vfmina_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4113
+{
4114
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFMINA_D, vd, vj, vk));
4115
+}
4116
+
4117
+/* Emits the `vfcvt.h.s vd, vj, vk` instruction. */
4118
+static void __attribute__((unused))
4119
+tcg_out_opc_vfcvt_h_s(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4120
+{
4121
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_H_S, vd, vj, vk));
4122
+}
4123
+
4124
+/* Emits the `vfcvt.s.d vd, vj, vk` instruction. */
4125
+static void __attribute__((unused))
4126
+tcg_out_opc_vfcvt_s_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4127
+{
4128
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFCVT_S_D, vd, vj, vk));
4129
+}
4130
+
4131
+/* Emits the `vffint.s.l vd, vj, vk` instruction. */
4132
+static void __attribute__((unused))
4133
+tcg_out_opc_vffint_s_l(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4134
+{
4135
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFFINT_S_L, vd, vj, vk));
4136
+}
4137
+
4138
+/* Emits the `vftint.w.d vd, vj, vk` instruction. */
4139
+static void __attribute__((unused))
4140
+tcg_out_opc_vftint_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4141
+{
4142
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINT_W_D, vd, vj, vk));
4143
+}
4144
+
4145
+/* Emits the `vftintrm.w.d vd, vj, vk` instruction. */
4146
+static void __attribute__((unused))
4147
+tcg_out_opc_vftintrm_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4148
+{
4149
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRM_W_D, vd, vj, vk));
4150
+}
4151
+
4152
+/* Emits the `vftintrp.w.d vd, vj, vk` instruction. */
4153
+static void __attribute__((unused))
4154
+tcg_out_opc_vftintrp_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4155
+{
4156
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRP_W_D, vd, vj, vk));
4157
+}
4158
+
4159
+/* Emits the `vftintrz.w.d vd, vj, vk` instruction. */
4160
+static void __attribute__((unused))
4161
+tcg_out_opc_vftintrz_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4162
+{
4163
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRZ_W_D, vd, vj, vk));
4164
+}
4165
+
4166
+/* Emits the `vftintrne.w.d vd, vj, vk` instruction. */
4167
+static void __attribute__((unused))
4168
+tcg_out_opc_vftintrne_w_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4169
+{
4170
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VFTINTRNE_W_D, vd, vj, vk));
4171
+}
4172
+
4173
+/* Emits the `vshuf.h vd, vj, vk` instruction. */
4174
+static void __attribute__((unused))
4175
+tcg_out_opc_vshuf_h(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4176
+{
4177
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_H, vd, vj, vk));
4178
+}
4179
+
4180
+/* Emits the `vshuf.w vd, vj, vk` instruction. */
4181
+static void __attribute__((unused))
4182
+tcg_out_opc_vshuf_w(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4183
+{
4184
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_W, vd, vj, vk));
4185
+}
4186
+
4187
+/* Emits the `vshuf.d vd, vj, vk` instruction. */
4188
+static void __attribute__((unused))
4189
+tcg_out_opc_vshuf_d(TCGContext *s, TCGReg vd, TCGReg vj, TCGReg vk)
4190
+{
4191
+ tcg_out32(s, encode_vdvjvk_insn(OPC_VSHUF_D, vd, vj, vk));
4192
+}
4193
+
4194
+/* Emits the `vseqi.b vd, vj, sk5` instruction. */
4195
+static void __attribute__((unused))
4196
+tcg_out_opc_vseqi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4197
+{
4198
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_B, vd, vj, sk5));
4199
+}
4200
+
4201
+/* Emits the `vseqi.h vd, vj, sk5` instruction. */
4202
+static void __attribute__((unused))
4203
+tcg_out_opc_vseqi_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4204
+{
4205
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_H, vd, vj, sk5));
4206
+}
4207
+
4208
+/* Emits the `vseqi.w vd, vj, sk5` instruction. */
4209
+static void __attribute__((unused))
4210
+tcg_out_opc_vseqi_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4211
+{
4212
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_W, vd, vj, sk5));
4213
+}
4214
+
4215
+/* Emits the `vseqi.d vd, vj, sk5` instruction. */
4216
+static void __attribute__((unused))
4217
+tcg_out_opc_vseqi_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4218
+{
4219
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSEQI_D, vd, vj, sk5));
4220
+}
4221
+
4222
+/* Emits the `vslei.b vd, vj, sk5` instruction. */
4223
+static void __attribute__((unused))
4224
+tcg_out_opc_vslei_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4225
+{
4226
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_B, vd, vj, sk5));
4227
+}
4228
+
4229
+/* Emits the `vslei.h vd, vj, sk5` instruction. */
4230
+static void __attribute__((unused))
4231
+tcg_out_opc_vslei_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4232
+{
4233
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_H, vd, vj, sk5));
4234
+}
4235
+
4236
+/* Emits the `vslei.w vd, vj, sk5` instruction. */
4237
+static void __attribute__((unused))
4238
+tcg_out_opc_vslei_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4239
+{
4240
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_W, vd, vj, sk5));
4241
+}
4242
+
4243
+/* Emits the `vslei.d vd, vj, sk5` instruction. */
4244
+static void __attribute__((unused))
4245
+tcg_out_opc_vslei_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4246
+{
4247
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLEI_D, vd, vj, sk5));
4248
+}
4249
+
4250
+/* Emits the `vslei.bu vd, vj, uk5` instruction. */
4251
+static void __attribute__((unused))
4252
+tcg_out_opc_vslei_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4253
+{
4254
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_BU, vd, vj, uk5));
4255
+}
4256
+
4257
+/* Emits the `vslei.hu vd, vj, uk5` instruction. */
4258
+static void __attribute__((unused))
4259
+tcg_out_opc_vslei_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4260
+{
4261
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_HU, vd, vj, uk5));
4262
+}
4263
+
4264
+/* Emits the `vslei.wu vd, vj, uk5` instruction. */
4265
+static void __attribute__((unused))
4266
+tcg_out_opc_vslei_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4267
+{
4268
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_WU, vd, vj, uk5));
4269
+}
4270
+
4271
+/* Emits the `vslei.du vd, vj, uk5` instruction. */
4272
+static void __attribute__((unused))
4273
+tcg_out_opc_vslei_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4274
+{
4275
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLEI_DU, vd, vj, uk5));
4276
+}
4277
+
4278
+/* Emits the `vslti.b vd, vj, sk5` instruction. */
4279
+static void __attribute__((unused))
4280
+tcg_out_opc_vslti_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4281
+{
4282
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_B, vd, vj, sk5));
4283
+}
4284
+
4285
+/* Emits the `vslti.h vd, vj, sk5` instruction. */
4286
+static void __attribute__((unused))
4287
+tcg_out_opc_vslti_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4288
+{
4289
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_H, vd, vj, sk5));
4290
+}
4291
+
4292
+/* Emits the `vslti.w vd, vj, sk5` instruction. */
4293
+static void __attribute__((unused))
4294
+tcg_out_opc_vslti_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4295
+{
4296
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_W, vd, vj, sk5));
4297
+}
4298
+
4299
+/* Emits the `vslti.d vd, vj, sk5` instruction. */
4300
+static void __attribute__((unused))
4301
+tcg_out_opc_vslti_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4302
+{
4303
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VSLTI_D, vd, vj, sk5));
4304
+}
4305
+
4306
+/* Emits the `vslti.bu vd, vj, uk5` instruction. */
4307
+static void __attribute__((unused))
4308
+tcg_out_opc_vslti_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4309
+{
4310
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_BU, vd, vj, uk5));
4311
+}
4312
+
4313
+/* Emits the `vslti.hu vd, vj, uk5` instruction. */
4314
+static void __attribute__((unused))
4315
+tcg_out_opc_vslti_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4316
+{
4317
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_HU, vd, vj, uk5));
4318
+}
4319
+
4320
+/* Emits the `vslti.wu vd, vj, uk5` instruction. */
4321
+static void __attribute__((unused))
4322
+tcg_out_opc_vslti_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4323
+{
4324
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_WU, vd, vj, uk5));
4325
+}
4326
+
4327
+/* Emits the `vslti.du vd, vj, uk5` instruction. */
4328
+static void __attribute__((unused))
4329
+tcg_out_opc_vslti_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4330
+{
4331
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLTI_DU, vd, vj, uk5));
4332
+}
4333
+
4334
+/* Emits the `vaddi.bu vd, vj, uk5` instruction. */
4335
+static void __attribute__((unused))
4336
+tcg_out_opc_vaddi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4337
+{
4338
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_BU, vd, vj, uk5));
4339
+}
4340
+
4341
+/* Emits the `vaddi.hu vd, vj, uk5` instruction. */
4342
+static void __attribute__((unused))
4343
+tcg_out_opc_vaddi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4344
+{
4345
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_HU, vd, vj, uk5));
4346
+}
4347
+
4348
+/* Emits the `vaddi.wu vd, vj, uk5` instruction. */
4349
+static void __attribute__((unused))
4350
+tcg_out_opc_vaddi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4351
+{
4352
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_WU, vd, vj, uk5));
4353
+}
4354
+
4355
+/* Emits the `vaddi.du vd, vj, uk5` instruction. */
4356
+static void __attribute__((unused))
4357
+tcg_out_opc_vaddi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4358
+{
4359
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VADDI_DU, vd, vj, uk5));
4360
+}
4361
+
4362
+/* Emits the `vsubi.bu vd, vj, uk5` instruction. */
4363
+static void __attribute__((unused))
4364
+tcg_out_opc_vsubi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4365
+{
4366
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_BU, vd, vj, uk5));
4367
+}
4368
+
4369
+/* Emits the `vsubi.hu vd, vj, uk5` instruction. */
4370
+static void __attribute__((unused))
4371
+tcg_out_opc_vsubi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4372
+{
4373
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_HU, vd, vj, uk5));
4374
+}
4375
+
4376
+/* Emits the `vsubi.wu vd, vj, uk5` instruction. */
4377
+static void __attribute__((unused))
4378
+tcg_out_opc_vsubi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4379
+{
4380
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_WU, vd, vj, uk5));
4381
+}
4382
+
4383
+/* Emits the `vsubi.du vd, vj, uk5` instruction. */
4384
+static void __attribute__((unused))
4385
+tcg_out_opc_vsubi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4386
+{
4387
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSUBI_DU, vd, vj, uk5));
4388
+}
4389
+
4390
+/* Emits the `vbsll.v vd, vj, uk5` instruction. */
4391
+static void __attribute__((unused))
4392
+tcg_out_opc_vbsll_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4393
+{
4394
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSLL_V, vd, vj, uk5));
4395
+}
4396
+
4397
+/* Emits the `vbsrl.v vd, vj, uk5` instruction. */
4398
+static void __attribute__((unused))
4399
+tcg_out_opc_vbsrl_v(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4400
+{
4401
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBSRL_V, vd, vj, uk5));
4402
+}
4403
+
4404
+/* Emits the `vmaxi.b vd, vj, sk5` instruction. */
4405
+static void __attribute__((unused))
4406
+tcg_out_opc_vmaxi_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4407
+{
4408
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_B, vd, vj, sk5));
4409
+}
4410
+
4411
+/* Emits the `vmaxi.h vd, vj, sk5` instruction. */
4412
+static void __attribute__((unused))
4413
+tcg_out_opc_vmaxi_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4414
+{
4415
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_H, vd, vj, sk5));
4416
+}
4417
+
4418
+/* Emits the `vmaxi.w vd, vj, sk5` instruction. */
4419
+static void __attribute__((unused))
4420
+tcg_out_opc_vmaxi_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4421
+{
4422
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_W, vd, vj, sk5));
4423
+}
4424
+
4425
+/* Emits the `vmaxi.d vd, vj, sk5` instruction. */
4426
+static void __attribute__((unused))
4427
+tcg_out_opc_vmaxi_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4428
+{
4429
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMAXI_D, vd, vj, sk5));
4430
+}
4431
+
4432
+/* Emits the `vmini.b vd, vj, sk5` instruction. */
4433
+static void __attribute__((unused))
4434
+tcg_out_opc_vmini_b(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4435
+{
4436
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_B, vd, vj, sk5));
4437
+}
4438
+
4439
+/* Emits the `vmini.h vd, vj, sk5` instruction. */
4440
+static void __attribute__((unused))
4441
+tcg_out_opc_vmini_h(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4442
+{
4443
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_H, vd, vj, sk5));
4444
+}
4445
+
4446
+/* Emits the `vmini.w vd, vj, sk5` instruction. */
4447
+static void __attribute__((unused))
4448
+tcg_out_opc_vmini_w(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4449
+{
4450
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_W, vd, vj, sk5));
4451
+}
4452
+
4453
+/* Emits the `vmini.d vd, vj, sk5` instruction. */
4454
+static void __attribute__((unused))
4455
+tcg_out_opc_vmini_d(TCGContext *s, TCGReg vd, TCGReg vj, int32_t sk5)
4456
+{
4457
+ tcg_out32(s, encode_vdvjsk5_insn(OPC_VMINI_D, vd, vj, sk5));
4458
+}
4459
+
4460
+/* Emits the `vmaxi.bu vd, vj, uk5` instruction. */
4461
+static void __attribute__((unused))
4462
+tcg_out_opc_vmaxi_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4463
+{
4464
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_BU, vd, vj, uk5));
4465
+}
4466
+
4467
+/* Emits the `vmaxi.hu vd, vj, uk5` instruction. */
4468
+static void __attribute__((unused))
4469
+tcg_out_opc_vmaxi_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4470
+{
4471
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_HU, vd, vj, uk5));
4472
+}
4473
+
4474
+/* Emits the `vmaxi.wu vd, vj, uk5` instruction. */
4475
+static void __attribute__((unused))
4476
+tcg_out_opc_vmaxi_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4477
+{
4478
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_WU, vd, vj, uk5));
4479
+}
4480
+
4481
+/* Emits the `vmaxi.du vd, vj, uk5` instruction. */
4482
+static void __attribute__((unused))
4483
+tcg_out_opc_vmaxi_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4484
+{
4485
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMAXI_DU, vd, vj, uk5));
4486
+}
4487
+
4488
+/* Emits the `vmini.bu vd, vj, uk5` instruction. */
4489
+static void __attribute__((unused))
4490
+tcg_out_opc_vmini_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4491
+{
4492
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_BU, vd, vj, uk5));
4493
+}
4494
+
4495
+/* Emits the `vmini.hu vd, vj, uk5` instruction. */
4496
+static void __attribute__((unused))
4497
+tcg_out_opc_vmini_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4498
+{
4499
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_HU, vd, vj, uk5));
4500
+}
4501
+
4502
+/* Emits the `vmini.wu vd, vj, uk5` instruction. */
4503
+static void __attribute__((unused))
4504
+tcg_out_opc_vmini_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4505
+{
4506
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_WU, vd, vj, uk5));
4507
+}
4508
+
4509
+/* Emits the `vmini.du vd, vj, uk5` instruction. */
4510
+static void __attribute__((unused))
4511
+tcg_out_opc_vmini_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4512
+{
4513
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VMINI_DU, vd, vj, uk5));
4514
+}
4515
+
4516
+/* Emits the `vfrstpi.b vd, vj, uk5` instruction. */
4517
+static void __attribute__((unused))
4518
+tcg_out_opc_vfrstpi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4519
+{
4520
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_B, vd, vj, uk5));
4521
+}
4522
+
4523
+/* Emits the `vfrstpi.h vd, vj, uk5` instruction. */
4524
+static void __attribute__((unused))
4525
+tcg_out_opc_vfrstpi_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
4526
+{
4527
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VFRSTPI_H, vd, vj, uk5));
4528
+}
4529
+
4530
+/* Emits the `vclo.b vd, vj` instruction. */
4531
+static void __attribute__((unused))
4532
+tcg_out_opc_vclo_b(TCGContext *s, TCGReg vd, TCGReg vj)
4533
+{
4534
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_B, vd, vj));
4535
+}
4536
+
4537
+/* Emits the `vclo.h vd, vj` instruction. */
4538
+static void __attribute__((unused))
4539
+tcg_out_opc_vclo_h(TCGContext *s, TCGReg vd, TCGReg vj)
4540
+{
4541
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_H, vd, vj));
4542
+}
4543
+
4544
+/* Emits the `vclo.w vd, vj` instruction. */
4545
+static void __attribute__((unused))
4546
+tcg_out_opc_vclo_w(TCGContext *s, TCGReg vd, TCGReg vj)
4547
+{
4548
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_W, vd, vj));
4549
+}
4550
+
4551
+/* Emits the `vclo.d vd, vj` instruction. */
4552
+static void __attribute__((unused))
4553
+tcg_out_opc_vclo_d(TCGContext *s, TCGReg vd, TCGReg vj)
4554
+{
4555
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLO_D, vd, vj));
4556
+}
4557
+
4558
+/* Emits the `vclz.b vd, vj` instruction. */
4559
+static void __attribute__((unused))
4560
+tcg_out_opc_vclz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4561
+{
4562
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_B, vd, vj));
4563
+}
4564
+
4565
+/* Emits the `vclz.h vd, vj` instruction. */
4566
+static void __attribute__((unused))
4567
+tcg_out_opc_vclz_h(TCGContext *s, TCGReg vd, TCGReg vj)
4568
+{
4569
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_H, vd, vj));
4570
+}
4571
+
4572
+/* Emits the `vclz.w vd, vj` instruction. */
4573
+static void __attribute__((unused))
4574
+tcg_out_opc_vclz_w(TCGContext *s, TCGReg vd, TCGReg vj)
4575
+{
4576
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_W, vd, vj));
4577
+}
4578
+
4579
+/* Emits the `vclz.d vd, vj` instruction. */
4580
+static void __attribute__((unused))
4581
+tcg_out_opc_vclz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4582
+{
4583
+ tcg_out32(s, encode_vdvj_insn(OPC_VCLZ_D, vd, vj));
4584
+}
4585
+
4586
+/* Emits the `vpcnt.b vd, vj` instruction. */
4587
+static void __attribute__((unused))
4588
+tcg_out_opc_vpcnt_b(TCGContext *s, TCGReg vd, TCGReg vj)
4589
+{
4590
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_B, vd, vj));
4591
+}
4592
+
4593
+/* Emits the `vpcnt.h vd, vj` instruction. */
4594
+static void __attribute__((unused))
4595
+tcg_out_opc_vpcnt_h(TCGContext *s, TCGReg vd, TCGReg vj)
4596
+{
4597
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_H, vd, vj));
4598
+}
4599
+
4600
+/* Emits the `vpcnt.w vd, vj` instruction. */
4601
+static void __attribute__((unused))
4602
+tcg_out_opc_vpcnt_w(TCGContext *s, TCGReg vd, TCGReg vj)
4603
+{
4604
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_W, vd, vj));
4605
+}
4606
+
4607
+/* Emits the `vpcnt.d vd, vj` instruction. */
4608
+static void __attribute__((unused))
4609
+tcg_out_opc_vpcnt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4610
+{
4611
+ tcg_out32(s, encode_vdvj_insn(OPC_VPCNT_D, vd, vj));
4612
+}
4613
+
4614
+/* Emits the `vneg.b vd, vj` instruction. */
4615
+static void __attribute__((unused))
4616
+tcg_out_opc_vneg_b(TCGContext *s, TCGReg vd, TCGReg vj)
4617
+{
4618
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_B, vd, vj));
4619
+}
4620
+
4621
+/* Emits the `vneg.h vd, vj` instruction. */
4622
+static void __attribute__((unused))
4623
+tcg_out_opc_vneg_h(TCGContext *s, TCGReg vd, TCGReg vj)
4624
+{
4625
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_H, vd, vj));
4626
+}
4627
+
4628
+/* Emits the `vneg.w vd, vj` instruction. */
4629
+static void __attribute__((unused))
4630
+tcg_out_opc_vneg_w(TCGContext *s, TCGReg vd, TCGReg vj)
4631
+{
4632
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_W, vd, vj));
4633
+}
4634
+
4635
+/* Emits the `vneg.d vd, vj` instruction. */
4636
+static void __attribute__((unused))
4637
+tcg_out_opc_vneg_d(TCGContext *s, TCGReg vd, TCGReg vj)
4638
+{
4639
+ tcg_out32(s, encode_vdvj_insn(OPC_VNEG_D, vd, vj));
4640
+}
4641
+
4642
+/* Emits the `vmskltz.b vd, vj` instruction. */
4643
+static void __attribute__((unused))
4644
+tcg_out_opc_vmskltz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4645
+{
4646
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_B, vd, vj));
4647
+}
4648
+
4649
+/* Emits the `vmskltz.h vd, vj` instruction. */
4650
+static void __attribute__((unused))
4651
+tcg_out_opc_vmskltz_h(TCGContext *s, TCGReg vd, TCGReg vj)
4652
+{
4653
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_H, vd, vj));
4654
+}
4655
+
4656
+/* Emits the `vmskltz.w vd, vj` instruction. */
4657
+static void __attribute__((unused))
4658
+tcg_out_opc_vmskltz_w(TCGContext *s, TCGReg vd, TCGReg vj)
4659
+{
4660
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_W, vd, vj));
4661
+}
4662
+
4663
+/* Emits the `vmskltz.d vd, vj` instruction. */
4664
+static void __attribute__((unused))
4665
+tcg_out_opc_vmskltz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4666
+{
4667
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKLTZ_D, vd, vj));
4668
+}
4669
+
4670
+/* Emits the `vmskgez.b vd, vj` instruction. */
4671
+static void __attribute__((unused))
4672
+tcg_out_opc_vmskgez_b(TCGContext *s, TCGReg vd, TCGReg vj)
4673
+{
4674
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKGEZ_B, vd, vj));
4675
+}
4676
+
4677
+/* Emits the `vmsknz.b vd, vj` instruction. */
4678
+static void __attribute__((unused))
4679
+tcg_out_opc_vmsknz_b(TCGContext *s, TCGReg vd, TCGReg vj)
4680
+{
4681
+ tcg_out32(s, encode_vdvj_insn(OPC_VMSKNZ_B, vd, vj));
4682
+}
4683
+
4684
+/* Emits the `vseteqz.v cd, vj` instruction. */
4685
+static void __attribute__((unused))
4686
+tcg_out_opc_vseteqz_v(TCGContext *s, TCGReg cd, TCGReg vj)
4687
+{
4688
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETEQZ_V, cd, vj));
4689
+}
4690
+
4691
+/* Emits the `vsetnez.v cd, vj` instruction. */
4692
+static void __attribute__((unused))
4693
+tcg_out_opc_vsetnez_v(TCGContext *s, TCGReg cd, TCGReg vj)
4694
+{
4695
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETNEZ_V, cd, vj));
4696
+}
4697
+
4698
+/* Emits the `vsetanyeqz.b cd, vj` instruction. */
4699
+static void __attribute__((unused))
4700
+tcg_out_opc_vsetanyeqz_b(TCGContext *s, TCGReg cd, TCGReg vj)
4701
+{
4702
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_B, cd, vj));
4703
+}
4704
+
4705
+/* Emits the `vsetanyeqz.h cd, vj` instruction. */
4706
+static void __attribute__((unused))
4707
+tcg_out_opc_vsetanyeqz_h(TCGContext *s, TCGReg cd, TCGReg vj)
4708
+{
4709
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_H, cd, vj));
4710
+}
4711
+
4712
+/* Emits the `vsetanyeqz.w cd, vj` instruction. */
4713
+static void __attribute__((unused))
4714
+tcg_out_opc_vsetanyeqz_w(TCGContext *s, TCGReg cd, TCGReg vj)
4715
+{
4716
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_W, cd, vj));
4717
+}
4718
+
4719
+/* Emits the `vsetanyeqz.d cd, vj` instruction. */
4720
+static void __attribute__((unused))
4721
+tcg_out_opc_vsetanyeqz_d(TCGContext *s, TCGReg cd, TCGReg vj)
4722
+{
4723
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETANYEQZ_D, cd, vj));
4724
+}
4725
+
4726
+/* Emits the `vsetallnez.b cd, vj` instruction. */
4727
+static void __attribute__((unused))
4728
+tcg_out_opc_vsetallnez_b(TCGContext *s, TCGReg cd, TCGReg vj)
4729
+{
4730
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_B, cd, vj));
4731
+}
4732
+
4733
+/* Emits the `vsetallnez.h cd, vj` instruction. */
4734
+static void __attribute__((unused))
4735
+tcg_out_opc_vsetallnez_h(TCGContext *s, TCGReg cd, TCGReg vj)
4736
+{
4737
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_H, cd, vj));
4738
+}
4739
+
4740
+/* Emits the `vsetallnez.w cd, vj` instruction. */
4741
+static void __attribute__((unused))
4742
+tcg_out_opc_vsetallnez_w(TCGContext *s, TCGReg cd, TCGReg vj)
4743
+{
4744
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_W, cd, vj));
4745
+}
4746
+
4747
+/* Emits the `vsetallnez.d cd, vj` instruction. */
4748
+static void __attribute__((unused))
4749
+tcg_out_opc_vsetallnez_d(TCGContext *s, TCGReg cd, TCGReg vj)
4750
+{
4751
+ tcg_out32(s, encode_cdvj_insn(OPC_VSETALLNEZ_D, cd, vj));
4752
+}
4753
+
4754
+/* Emits the `vflogb.s vd, vj` instruction. */
4755
+static void __attribute__((unused))
4756
+tcg_out_opc_vflogb_s(TCGContext *s, TCGReg vd, TCGReg vj)
4757
+{
4758
+ tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_S, vd, vj));
4759
+}
4760
+
4761
+/* Emits the `vflogb.d vd, vj` instruction. */
4762
+static void __attribute__((unused))
4763
+tcg_out_opc_vflogb_d(TCGContext *s, TCGReg vd, TCGReg vj)
4764
+{
4765
+ tcg_out32(s, encode_vdvj_insn(OPC_VFLOGB_D, vd, vj));
4766
+}
4767
+
4768
+/* Emits the `vfclass.s vd, vj` instruction. */
4769
+static void __attribute__((unused))
4770
+tcg_out_opc_vfclass_s(TCGContext *s, TCGReg vd, TCGReg vj)
4771
+{
4772
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_S, vd, vj));
4773
+}
4774
+
4775
+/* Emits the `vfclass.d vd, vj` instruction. */
4776
+static void __attribute__((unused))
4777
+tcg_out_opc_vfclass_d(TCGContext *s, TCGReg vd, TCGReg vj)
4778
+{
4779
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCLASS_D, vd, vj));
4780
+}
4781
+
4782
+/* Emits the `vfsqrt.s vd, vj` instruction. */
4783
+static void __attribute__((unused))
4784
+tcg_out_opc_vfsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
4785
+{
4786
+ tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_S, vd, vj));
4787
+}
4788
+
4789
+/* Emits the `vfsqrt.d vd, vj` instruction. */
4790
+static void __attribute__((unused))
4791
+tcg_out_opc_vfsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4792
+{
4793
+ tcg_out32(s, encode_vdvj_insn(OPC_VFSQRT_D, vd, vj));
4794
+}
4795
+
4796
+/* Emits the `vfrecip.s vd, vj` instruction. */
4797
+static void __attribute__((unused))
4798
+tcg_out_opc_vfrecip_s(TCGContext *s, TCGReg vd, TCGReg vj)
4799
+{
4800
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_S, vd, vj));
4801
+}
4802
+
4803
+/* Emits the `vfrecip.d vd, vj` instruction. */
4804
+static void __attribute__((unused))
4805
+tcg_out_opc_vfrecip_d(TCGContext *s, TCGReg vd, TCGReg vj)
4806
+{
4807
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRECIP_D, vd, vj));
4808
+}
4809
+
4810
+/* Emits the `vfrsqrt.s vd, vj` instruction. */
4811
+static void __attribute__((unused))
4812
+tcg_out_opc_vfrsqrt_s(TCGContext *s, TCGReg vd, TCGReg vj)
4813
+{
4814
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_S, vd, vj));
4815
+}
4816
+
4817
+/* Emits the `vfrsqrt.d vd, vj` instruction. */
4818
+static void __attribute__((unused))
4819
+tcg_out_opc_vfrsqrt_d(TCGContext *s, TCGReg vd, TCGReg vj)
4820
+{
4821
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRSQRT_D, vd, vj));
4822
+}
4823
+
4824
+/* Emits the `vfrint.s vd, vj` instruction. */
4825
+static void __attribute__((unused))
4826
+tcg_out_opc_vfrint_s(TCGContext *s, TCGReg vd, TCGReg vj)
4827
+{
4828
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_S, vd, vj));
4829
+}
4830
+
4831
+/* Emits the `vfrint.d vd, vj` instruction. */
4832
+static void __attribute__((unused))
4833
+tcg_out_opc_vfrint_d(TCGContext *s, TCGReg vd, TCGReg vj)
4834
+{
4835
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINT_D, vd, vj));
4836
+}
4837
+
4838
+/* Emits the `vfrintrm.s vd, vj` instruction. */
4839
+static void __attribute__((unused))
4840
+tcg_out_opc_vfrintrm_s(TCGContext *s, TCGReg vd, TCGReg vj)
4841
+{
4842
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_S, vd, vj));
4843
+}
4844
+
4845
+/* Emits the `vfrintrm.d vd, vj` instruction. */
4846
+static void __attribute__((unused))
4847
+tcg_out_opc_vfrintrm_d(TCGContext *s, TCGReg vd, TCGReg vj)
4848
+{
4849
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRM_D, vd, vj));
4850
+}
4851
+
4852
+/* Emits the `vfrintrp.s vd, vj` instruction. */
4853
+static void __attribute__((unused))
4854
+tcg_out_opc_vfrintrp_s(TCGContext *s, TCGReg vd, TCGReg vj)
4855
+{
4856
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_S, vd, vj));
4857
+}
4858
+
4859
+/* Emits the `vfrintrp.d vd, vj` instruction. */
4860
+static void __attribute__((unused))
4861
+tcg_out_opc_vfrintrp_d(TCGContext *s, TCGReg vd, TCGReg vj)
4862
+{
4863
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRP_D, vd, vj));
4864
+}
4865
+
4866
+/* Emits the `vfrintrz.s vd, vj` instruction. */
4867
+static void __attribute__((unused))
4868
+tcg_out_opc_vfrintrz_s(TCGContext *s, TCGReg vd, TCGReg vj)
4869
+{
4870
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_S, vd, vj));
4871
+}
4872
+
4873
+/* Emits the `vfrintrz.d vd, vj` instruction. */
4874
+static void __attribute__((unused))
4875
+tcg_out_opc_vfrintrz_d(TCGContext *s, TCGReg vd, TCGReg vj)
4876
+{
4877
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRZ_D, vd, vj));
4878
+}
4879
+
4880
+/* Emits the `vfrintrne.s vd, vj` instruction. */
4881
+static void __attribute__((unused))
4882
+tcg_out_opc_vfrintrne_s(TCGContext *s, TCGReg vd, TCGReg vj)
4883
+{
4884
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_S, vd, vj));
4885
+}
4886
+
4887
+/* Emits the `vfrintrne.d vd, vj` instruction. */
4888
+static void __attribute__((unused))
4889
+tcg_out_opc_vfrintrne_d(TCGContext *s, TCGReg vd, TCGReg vj)
4890
+{
4891
+ tcg_out32(s, encode_vdvj_insn(OPC_VFRINTRNE_D, vd, vj));
4892
+}
4893
+
4894
+/* Emits the `vfcvtl.s.h vd, vj` instruction. */
4895
+static void __attribute__((unused))
4896
+tcg_out_opc_vfcvtl_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
4897
+{
4898
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_S_H, vd, vj));
4899
+}
4900
+
4901
+/* Emits the `vfcvth.s.h vd, vj` instruction. */
4902
+static void __attribute__((unused))
4903
+tcg_out_opc_vfcvth_s_h(TCGContext *s, TCGReg vd, TCGReg vj)
4904
+{
4905
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_S_H, vd, vj));
4906
+}
4907
+
4908
+/* Emits the `vfcvtl.d.s vd, vj` instruction. */
4909
+static void __attribute__((unused))
4910
+tcg_out_opc_vfcvtl_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
4911
+{
4912
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTL_D_S, vd, vj));
4913
+}
4914
+
4915
+/* Emits the `vfcvth.d.s vd, vj` instruction. */
4916
+static void __attribute__((unused))
4917
+tcg_out_opc_vfcvth_d_s(TCGContext *s, TCGReg vd, TCGReg vj)
4918
+{
4919
+ tcg_out32(s, encode_vdvj_insn(OPC_VFCVTH_D_S, vd, vj));
4920
+}
4921
+
4922
+/* Emits the `vffint.s.w vd, vj` instruction. */
4923
+static void __attribute__((unused))
4924
+tcg_out_opc_vffint_s_w(TCGContext *s, TCGReg vd, TCGReg vj)
4925
+{
4926
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_W, vd, vj));
4927
+}
4928
+
4929
+/* Emits the `vffint.s.wu vd, vj` instruction. */
4930
+static void __attribute__((unused))
4931
+tcg_out_opc_vffint_s_wu(TCGContext *s, TCGReg vd, TCGReg vj)
4932
+{
4933
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_S_WU, vd, vj));
4934
+}
4935
+
4936
+/* Emits the `vffint.d.l vd, vj` instruction. */
4937
+static void __attribute__((unused))
4938
+tcg_out_opc_vffint_d_l(TCGContext *s, TCGReg vd, TCGReg vj)
4939
+{
4940
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_L, vd, vj));
4941
+}
4942
+
4943
+/* Emits the `vffint.d.lu vd, vj` instruction. */
4944
+static void __attribute__((unused))
4945
+tcg_out_opc_vffint_d_lu(TCGContext *s, TCGReg vd, TCGReg vj)
4946
+{
4947
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINT_D_LU, vd, vj));
4948
+}
4949
+
4950
+/* Emits the `vffintl.d.w vd, vj` instruction. */
4951
+static void __attribute__((unused))
4952
+tcg_out_opc_vffintl_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
4953
+{
4954
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINTL_D_W, vd, vj));
4955
+}
4956
+
4957
+/* Emits the `vffinth.d.w vd, vj` instruction. */
4958
+static void __attribute__((unused))
4959
+tcg_out_opc_vffinth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
4960
+{
4961
+ tcg_out32(s, encode_vdvj_insn(OPC_VFFINTH_D_W, vd, vj));
4962
+}
4963
+
4964
+/* Emits the `vftint.w.s vd, vj` instruction. */
4965
+static void __attribute__((unused))
4966
+tcg_out_opc_vftint_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4967
+{
4968
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_W_S, vd, vj));
4969
+}
4970
+
4971
+/* Emits the `vftint.l.d vd, vj` instruction. */
4972
+static void __attribute__((unused))
4973
+tcg_out_opc_vftint_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4974
+{
4975
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_L_D, vd, vj));
4976
+}
4977
+
4978
+/* Emits the `vftintrm.w.s vd, vj` instruction. */
4979
+static void __attribute__((unused))
4980
+tcg_out_opc_vftintrm_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4981
+{
4982
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_W_S, vd, vj));
4983
+}
4984
+
4985
+/* Emits the `vftintrm.l.d vd, vj` instruction. */
4986
+static void __attribute__((unused))
4987
+tcg_out_opc_vftintrm_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
4988
+{
4989
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRM_L_D, vd, vj));
4990
+}
4991
+
4992
+/* Emits the `vftintrp.w.s vd, vj` instruction. */
4993
+static void __attribute__((unused))
4994
+tcg_out_opc_vftintrp_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
4995
+{
4996
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_W_S, vd, vj));
4997
+}
4998
+
4999
+/* Emits the `vftintrp.l.d vd, vj` instruction. */
5000
+static void __attribute__((unused))
5001
+tcg_out_opc_vftintrp_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
5002
+{
5003
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRP_L_D, vd, vj));
5004
+}
5005
+
5006
+/* Emits the `vftintrz.w.s vd, vj` instruction. */
5007
+static void __attribute__((unused))
5008
+tcg_out_opc_vftintrz_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
5009
+{
5010
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_W_S, vd, vj));
5011
+}
5012
+
5013
+/* Emits the `vftintrz.l.d vd, vj` instruction. */
5014
+static void __attribute__((unused))
5015
+tcg_out_opc_vftintrz_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
5016
+{
5017
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_L_D, vd, vj));
5018
+}
5019
+
5020
+/* Emits the `vftintrne.w.s vd, vj` instruction. */
5021
+static void __attribute__((unused))
5022
+tcg_out_opc_vftintrne_w_s(TCGContext *s, TCGReg vd, TCGReg vj)
5023
+{
5024
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_W_S, vd, vj));
5025
+}
5026
+
5027
+/* Emits the `vftintrne.l.d vd, vj` instruction. */
5028
+static void __attribute__((unused))
5029
+tcg_out_opc_vftintrne_l_d(TCGContext *s, TCGReg vd, TCGReg vj)
5030
+{
5031
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNE_L_D, vd, vj));
5032
+}
5033
+
5034
+/* Emits the `vftint.wu.s vd, vj` instruction. */
5035
+static void __attribute__((unused))
5036
+tcg_out_opc_vftint_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
5037
+{
5038
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_WU_S, vd, vj));
5039
+}
5040
+
5041
+/* Emits the `vftint.lu.d vd, vj` instruction. */
5042
+static void __attribute__((unused))
5043
+tcg_out_opc_vftint_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
5044
+{
5045
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINT_LU_D, vd, vj));
5046
+}
5047
+
5048
+/* Emits the `vftintrz.wu.s vd, vj` instruction. */
5049
+static void __attribute__((unused))
5050
+tcg_out_opc_vftintrz_wu_s(TCGContext *s, TCGReg vd, TCGReg vj)
5051
+{
5052
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_WU_S, vd, vj));
5053
+}
5054
+
5055
+/* Emits the `vftintrz.lu.d vd, vj` instruction. */
5056
+static void __attribute__((unused))
5057
+tcg_out_opc_vftintrz_lu_d(TCGContext *s, TCGReg vd, TCGReg vj)
5058
+{
5059
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZ_LU_D, vd, vj));
5060
+}
5061
+
5062
+/* Emits the `vftintl.l.s vd, vj` instruction. */
5063
+static void __attribute__((unused))
5064
+tcg_out_opc_vftintl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5065
+{
5066
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTL_L_S, vd, vj));
5067
+}
5068
+
5069
+/* Emits the `vftinth.l.s vd, vj` instruction. */
5070
+static void __attribute__((unused))
5071
+tcg_out_opc_vftinth_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5072
+{
5073
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTH_L_S, vd, vj));
5074
+}
5075
+
5076
+/* Emits the `vftintrml.l.s vd, vj` instruction. */
5077
+static void __attribute__((unused))
5078
+tcg_out_opc_vftintrml_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5079
+{
5080
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRML_L_S, vd, vj));
5081
+}
5082
+
5083
+/* Emits the `vftintrmh.l.s vd, vj` instruction. */
5084
+static void __attribute__((unused))
5085
+tcg_out_opc_vftintrmh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5086
+{
5087
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRMH_L_S, vd, vj));
5088
+}
5089
+
5090
+/* Emits the `vftintrpl.l.s vd, vj` instruction. */
5091
+static void __attribute__((unused))
5092
+tcg_out_opc_vftintrpl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5093
+{
5094
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPL_L_S, vd, vj));
5095
+}
5096
+
5097
+/* Emits the `vftintrph.l.s vd, vj` instruction. */
5098
+static void __attribute__((unused))
5099
+tcg_out_opc_vftintrph_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5100
+{
5101
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRPH_L_S, vd, vj));
5102
+}
5103
+
5104
+/* Emits the `vftintrzl.l.s vd, vj` instruction. */
5105
+static void __attribute__((unused))
5106
+tcg_out_opc_vftintrzl_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5107
+{
5108
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZL_L_S, vd, vj));
5109
+}
5110
+
5111
+/* Emits the `vftintrzh.l.s vd, vj` instruction. */
5112
+static void __attribute__((unused))
5113
+tcg_out_opc_vftintrzh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5114
+{
5115
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRZH_L_S, vd, vj));
5116
+}
5117
+
5118
+/* Emits the `vftintrnel.l.s vd, vj` instruction. */
5119
+static void __attribute__((unused))
5120
+tcg_out_opc_vftintrnel_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5121
+{
5122
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEL_L_S, vd, vj));
5123
+}
5124
+
5125
+/* Emits the `vftintrneh.l.s vd, vj` instruction. */
5126
+static void __attribute__((unused))
5127
+tcg_out_opc_vftintrneh_l_s(TCGContext *s, TCGReg vd, TCGReg vj)
5128
+{
5129
+ tcg_out32(s, encode_vdvj_insn(OPC_VFTINTRNEH_L_S, vd, vj));
5130
+}
5131
+
5132
+/* Emits the `vexth.h.b vd, vj` instruction. */
5133
+static void __attribute__((unused))
5134
+tcg_out_opc_vexth_h_b(TCGContext *s, TCGReg vd, TCGReg vj)
5135
+{
5136
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_H_B, vd, vj));
5137
+}
5138
+
5139
+/* Emits the `vexth.w.h vd, vj` instruction. */
5140
+static void __attribute__((unused))
5141
+tcg_out_opc_vexth_w_h(TCGContext *s, TCGReg vd, TCGReg vj)
5142
+{
5143
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_W_H, vd, vj));
5144
+}
5145
+
5146
+/* Emits the `vexth.d.w vd, vj` instruction. */
5147
+static void __attribute__((unused))
5148
+tcg_out_opc_vexth_d_w(TCGContext *s, TCGReg vd, TCGReg vj)
5149
+{
5150
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_D_W, vd, vj));
5151
+}
5152
+
5153
+/* Emits the `vexth.q.d vd, vj` instruction. */
5154
+static void __attribute__((unused))
5155
+tcg_out_opc_vexth_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
5156
+{
5157
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_Q_D, vd, vj));
5158
+}
5159
+
5160
+/* Emits the `vexth.hu.bu vd, vj` instruction. */
5161
+static void __attribute__((unused))
5162
+tcg_out_opc_vexth_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj)
5163
+{
5164
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_HU_BU, vd, vj));
5165
+}
5166
+
5167
+/* Emits the `vexth.wu.hu vd, vj` instruction. */
5168
+static void __attribute__((unused))
5169
+tcg_out_opc_vexth_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj)
5170
+{
5171
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_WU_HU, vd, vj));
5172
+}
5173
+
5174
+/* Emits the `vexth.du.wu vd, vj` instruction. */
5175
+static void __attribute__((unused))
5176
+tcg_out_opc_vexth_du_wu(TCGContext *s, TCGReg vd, TCGReg vj)
5177
+{
5178
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_DU_WU, vd, vj));
5179
+}
5180
+
5181
+/* Emits the `vexth.qu.du vd, vj` instruction. */
5182
+static void __attribute__((unused))
5183
+tcg_out_opc_vexth_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
5184
+{
5185
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTH_QU_DU, vd, vj));
5186
+}
5187
+
5188
+/* Emits the `vreplgr2vr.b vd, j` instruction. */
5189
+static void __attribute__((unused))
5190
+tcg_out_opc_vreplgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j)
5191
+{
5192
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_B, vd, j));
5193
+}
5194
+
5195
+/* Emits the `vreplgr2vr.h vd, j` instruction. */
5196
+static void __attribute__((unused))
5197
+tcg_out_opc_vreplgr2vr_h(TCGContext *s, TCGReg vd, TCGReg j)
5198
+{
5199
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_H, vd, j));
5200
+}
5201
+
5202
+/* Emits the `vreplgr2vr.w vd, j` instruction. */
5203
+static void __attribute__((unused))
5204
+tcg_out_opc_vreplgr2vr_w(TCGContext *s, TCGReg vd, TCGReg j)
5205
+{
5206
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_W, vd, j));
5207
+}
5208
+
5209
+/* Emits the `vreplgr2vr.d vd, j` instruction. */
5210
+static void __attribute__((unused))
5211
+tcg_out_opc_vreplgr2vr_d(TCGContext *s, TCGReg vd, TCGReg j)
5212
+{
5213
+ tcg_out32(s, encode_vdj_insn(OPC_VREPLGR2VR_D, vd, j));
5214
+}
5215
+
5216
+/* Emits the `vrotri.b vd, vj, uk3` instruction. */
5217
+static void __attribute__((unused))
5218
+tcg_out_opc_vrotri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5219
+{
5220
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VROTRI_B, vd, vj, uk3));
5221
+}
5222
+
5223
+/* Emits the `vrotri.h vd, vj, uk4` instruction. */
5224
+static void __attribute__((unused))
5225
+tcg_out_opc_vrotri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5226
+{
5227
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VROTRI_H, vd, vj, uk4));
5228
+}
5229
+
5230
+/* Emits the `vrotri.w vd, vj, uk5` instruction. */
5231
+static void __attribute__((unused))
5232
+tcg_out_opc_vrotri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5233
+{
5234
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VROTRI_W, vd, vj, uk5));
5235
+}
5236
+
5237
+/* Emits the `vrotri.d vd, vj, uk6` instruction. */
5238
+static void __attribute__((unused))
5239
+tcg_out_opc_vrotri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5240
+{
5241
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VROTRI_D, vd, vj, uk6));
5242
+}
5243
+
5244
+/* Emits the `vsrlri.b vd, vj, uk3` instruction. */
5245
+static void __attribute__((unused))
5246
+tcg_out_opc_vsrlri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5247
+{
5248
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRLRI_B, vd, vj, uk3));
5249
+}
5250
+
5251
+/* Emits the `vsrlri.h vd, vj, uk4` instruction. */
5252
+static void __attribute__((unused))
5253
+tcg_out_opc_vsrlri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5254
+{
5255
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRI_H, vd, vj, uk4));
5256
+}
5257
+
5258
+/* Emits the `vsrlri.w vd, vj, uk5` instruction. */
5259
+static void __attribute__((unused))
5260
+tcg_out_opc_vsrlri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5261
+{
5262
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRI_W, vd, vj, uk5));
5263
+}
5264
+
5265
+/* Emits the `vsrlri.d vd, vj, uk6` instruction. */
5266
+static void __attribute__((unused))
5267
+tcg_out_opc_vsrlri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5268
+{
5269
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRI_D, vd, vj, uk6));
5270
+}
5271
+
5272
+/* Emits the `vsrari.b vd, vj, uk3` instruction. */
5273
+static void __attribute__((unused))
5274
+tcg_out_opc_vsrari_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5275
+{
5276
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRARI_B, vd, vj, uk3));
5277
+}
5278
+
5279
+/* Emits the `vsrari.h vd, vj, uk4` instruction. */
5280
+static void __attribute__((unused))
5281
+tcg_out_opc_vsrari_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5282
+{
5283
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARI_H, vd, vj, uk4));
5284
+}
5285
+
5286
+/* Emits the `vsrari.w vd, vj, uk5` instruction. */
5287
+static void __attribute__((unused))
5288
+tcg_out_opc_vsrari_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5289
+{
5290
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARI_W, vd, vj, uk5));
5291
+}
5292
+
5293
+/* Emits the `vsrari.d vd, vj, uk6` instruction. */
5294
+static void __attribute__((unused))
5295
+tcg_out_opc_vsrari_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5296
+{
5297
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARI_D, vd, vj, uk6));
5298
+}
5299
+
5300
+/* Emits the `vinsgr2vr.b vd, j, uk4` instruction. */
5301
+static void __attribute__((unused))
5302
+tcg_out_opc_vinsgr2vr_b(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk4)
5303
+{
5304
+ tcg_out32(s, encode_vdjuk4_insn(OPC_VINSGR2VR_B, vd, j, uk4));
5305
+}
5306
+
5307
+/* Emits the `vinsgr2vr.h vd, j, uk3` instruction. */
5308
+static void __attribute__((unused))
5309
+tcg_out_opc_vinsgr2vr_h(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk3)
5310
+{
5311
+ tcg_out32(s, encode_vdjuk3_insn(OPC_VINSGR2VR_H, vd, j, uk3));
5312
+}
5313
+
5314
+/* Emits the `vinsgr2vr.w vd, j, uk2` instruction. */
5315
+static void __attribute__((unused))
5316
+tcg_out_opc_vinsgr2vr_w(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk2)
5317
+{
5318
+ tcg_out32(s, encode_vdjuk2_insn(OPC_VINSGR2VR_W, vd, j, uk2));
5319
+}
5320
+
5321
+/* Emits the `vinsgr2vr.d vd, j, uk1` instruction. */
5322
+static void __attribute__((unused))
5323
+tcg_out_opc_vinsgr2vr_d(TCGContext *s, TCGReg vd, TCGReg j, uint32_t uk1)
5324
+{
5325
+ tcg_out32(s, encode_vdjuk1_insn(OPC_VINSGR2VR_D, vd, j, uk1));
5326
+}
5327
+
5328
+/* Emits the `vpickve2gr.b d, vj, uk4` instruction. */
5329
+static void __attribute__((unused))
5330
+tcg_out_opc_vpickve2gr_b(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk4)
5331
+{
5332
+ tcg_out32(s, encode_dvjuk4_insn(OPC_VPICKVE2GR_B, d, vj, uk4));
5333
+}
5334
+
5335
+/* Emits the `vpickve2gr.h d, vj, uk3` instruction. */
5336
+static void __attribute__((unused))
5337
+tcg_out_opc_vpickve2gr_h(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk3)
5338
+{
5339
+ tcg_out32(s, encode_dvjuk3_insn(OPC_VPICKVE2GR_H, d, vj, uk3));
5340
+}
5341
+
5342
+/* Emits the `vpickve2gr.w d, vj, uk2` instruction. */
5343
+static void __attribute__((unused))
5344
+tcg_out_opc_vpickve2gr_w(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk2)
5345
+{
5346
+ tcg_out32(s, encode_dvjuk2_insn(OPC_VPICKVE2GR_W, d, vj, uk2));
5347
+}
5348
+
5349
+/* Emits the `vpickve2gr.d d, vj, uk1` instruction. */
5350
+static void __attribute__((unused))
5351
+tcg_out_opc_vpickve2gr_d(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk1)
5352
+{
5353
+ tcg_out32(s, encode_dvjuk1_insn(OPC_VPICKVE2GR_D, d, vj, uk1));
5354
+}
5355
+
5356
+/* Emits the `vpickve2gr.bu d, vj, uk4` instruction. */
5357
+static void __attribute__((unused))
5358
+tcg_out_opc_vpickve2gr_bu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk4)
5359
+{
5360
+ tcg_out32(s, encode_dvjuk4_insn(OPC_VPICKVE2GR_BU, d, vj, uk4));
5361
+}
5362
+
5363
+/* Emits the `vpickve2gr.hu d, vj, uk3` instruction. */
5364
+static void __attribute__((unused))
5365
+tcg_out_opc_vpickve2gr_hu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk3)
5366
+{
5367
+ tcg_out32(s, encode_dvjuk3_insn(OPC_VPICKVE2GR_HU, d, vj, uk3));
5368
+}
5369
+
5370
+/* Emits the `vpickve2gr.wu d, vj, uk2` instruction. */
5371
+static void __attribute__((unused))
5372
+tcg_out_opc_vpickve2gr_wu(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk2)
5373
+{
5374
+ tcg_out32(s, encode_dvjuk2_insn(OPC_VPICKVE2GR_WU, d, vj, uk2));
5375
+}
5376
+
5377
+/* Emits the `vpickve2gr.du d, vj, uk1` instruction. */
5378
+static void __attribute__((unused))
5379
+tcg_out_opc_vpickve2gr_du(TCGContext *s, TCGReg d, TCGReg vj, uint32_t uk1)
5380
+{
5381
+ tcg_out32(s, encode_dvjuk1_insn(OPC_VPICKVE2GR_DU, d, vj, uk1));
5382
+}
5383
+
5384
+/* Emits the `vreplvei.b vd, vj, uk4` instruction. */
5385
+static void __attribute__((unused))
5386
+tcg_out_opc_vreplvei_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5387
+{
5388
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VREPLVEI_B, vd, vj, uk4));
5389
+}
5390
+
5391
+/* Emits the `vreplvei.h vd, vj, uk3` instruction. */
5392
+static void __attribute__((unused))
5393
+tcg_out_opc_vreplvei_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5394
+{
5395
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VREPLVEI_H, vd, vj, uk3));
5396
+}
5397
+
5398
+/* Emits the `vreplvei.w vd, vj, uk2` instruction. */
5399
+static void __attribute__((unused))
5400
+tcg_out_opc_vreplvei_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk2)
5401
+{
5402
+ tcg_out32(s, encode_vdvjuk2_insn(OPC_VREPLVEI_W, vd, vj, uk2));
5403
+}
5404
+
5405
+/* Emits the `vreplvei.d vd, vj, uk1` instruction. */
5406
+static void __attribute__((unused))
5407
+tcg_out_opc_vreplvei_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk1)
5408
+{
5409
+ tcg_out32(s, encode_vdvjuk1_insn(OPC_VREPLVEI_D, vd, vj, uk1));
5410
+}
5411
+
5412
+/* Emits the `vsllwil.h.b vd, vj, uk3` instruction. */
5413
+static void __attribute__((unused))
5414
+tcg_out_opc_vsllwil_h_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5415
+{
5416
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_H_B, vd, vj, uk3));
5417
+}
5418
+
5419
+/* Emits the `vsllwil.w.h vd, vj, uk4` instruction. */
5420
+static void __attribute__((unused))
5421
+tcg_out_opc_vsllwil_w_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5422
+{
5423
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_W_H, vd, vj, uk4));
5424
+}
5425
+
5426
+/* Emits the `vsllwil.d.w vd, vj, uk5` instruction. */
5427
+static void __attribute__((unused))
5428
+tcg_out_opc_vsllwil_d_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5429
+{
5430
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_D_W, vd, vj, uk5));
5431
+}
5432
+
5433
+/* Emits the `vextl.q.d vd, vj` instruction. */
5434
+static void __attribute__((unused))
5435
+tcg_out_opc_vextl_q_d(TCGContext *s, TCGReg vd, TCGReg vj)
5436
+{
5437
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_Q_D, vd, vj));
5438
+}
5439
+
5440
+/* Emits the `vsllwil.hu.bu vd, vj, uk3` instruction. */
5441
+static void __attribute__((unused))
5442
+tcg_out_opc_vsllwil_hu_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5443
+{
5444
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLWIL_HU_BU, vd, vj, uk3));
5445
+}
5446
+
5447
+/* Emits the `vsllwil.wu.hu vd, vj, uk4` instruction. */
5448
+static void __attribute__((unused))
5449
+tcg_out_opc_vsllwil_wu_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5450
+{
5451
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLWIL_WU_HU, vd, vj, uk4));
5452
+}
5453
+
5454
+/* Emits the `vsllwil.du.wu vd, vj, uk5` instruction. */
5455
+static void __attribute__((unused))
5456
+tcg_out_opc_vsllwil_du_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5457
+{
5458
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLWIL_DU_WU, vd, vj, uk5));
5459
+}
5460
+
5461
+/* Emits the `vextl.qu.du vd, vj` instruction. */
5462
+static void __attribute__((unused))
5463
+tcg_out_opc_vextl_qu_du(TCGContext *s, TCGReg vd, TCGReg vj)
5464
+{
5465
+ tcg_out32(s, encode_vdvj_insn(OPC_VEXTL_QU_DU, vd, vj));
5466
+}
5467
+
5468
+/* Emits the `vbitclri.b vd, vj, uk3` instruction. */
5469
+static void __attribute__((unused))
5470
+tcg_out_opc_vbitclri_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5471
+{
5472
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITCLRI_B, vd, vj, uk3));
5473
+}
5474
+
5475
+/* Emits the `vbitclri.h vd, vj, uk4` instruction. */
5476
+static void __attribute__((unused))
5477
+tcg_out_opc_vbitclri_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5478
+{
5479
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITCLRI_H, vd, vj, uk4));
5480
+}
5481
+
5482
+/* Emits the `vbitclri.w vd, vj, uk5` instruction. */
5483
+static void __attribute__((unused))
5484
+tcg_out_opc_vbitclri_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5485
+{
5486
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITCLRI_W, vd, vj, uk5));
5487
+}
5488
+
5489
+/* Emits the `vbitclri.d vd, vj, uk6` instruction. */
5490
+static void __attribute__((unused))
5491
+tcg_out_opc_vbitclri_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5492
+{
5493
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITCLRI_D, vd, vj, uk6));
5494
+}
5495
+
5496
+/* Emits the `vbitseti.b vd, vj, uk3` instruction. */
5497
+static void __attribute__((unused))
5498
+tcg_out_opc_vbitseti_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5499
+{
5500
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITSETI_B, vd, vj, uk3));
5501
+}
5502
+
5503
+/* Emits the `vbitseti.h vd, vj, uk4` instruction. */
5504
+static void __attribute__((unused))
5505
+tcg_out_opc_vbitseti_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5506
+{
5507
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITSETI_H, vd, vj, uk4));
5508
+}
5509
+
5510
+/* Emits the `vbitseti.w vd, vj, uk5` instruction. */
5511
+static void __attribute__((unused))
5512
+tcg_out_opc_vbitseti_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5513
+{
5514
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITSETI_W, vd, vj, uk5));
5515
+}
5516
+
5517
+/* Emits the `vbitseti.d vd, vj, uk6` instruction. */
5518
+static void __attribute__((unused))
5519
+tcg_out_opc_vbitseti_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5520
+{
5521
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITSETI_D, vd, vj, uk6));
5522
+}
5523
+
5524
+/* Emits the `vbitrevi.b vd, vj, uk3` instruction. */
5525
+static void __attribute__((unused))
5526
+tcg_out_opc_vbitrevi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5527
+{
5528
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VBITREVI_B, vd, vj, uk3));
5529
+}
5530
+
5531
+/* Emits the `vbitrevi.h vd, vj, uk4` instruction. */
5532
+static void __attribute__((unused))
5533
+tcg_out_opc_vbitrevi_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5534
+{
5535
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VBITREVI_H, vd, vj, uk4));
5536
+}
5537
+
5538
+/* Emits the `vbitrevi.w vd, vj, uk5` instruction. */
5539
+static void __attribute__((unused))
5540
+tcg_out_opc_vbitrevi_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5541
+{
5542
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VBITREVI_W, vd, vj, uk5));
5543
+}
5544
+
5545
+/* Emits the `vbitrevi.d vd, vj, uk6` instruction. */
5546
+static void __attribute__((unused))
5547
+tcg_out_opc_vbitrevi_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5548
+{
5549
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VBITREVI_D, vd, vj, uk6));
5550
+}
5551
+
5552
+/* Emits the `vsat.b vd, vj, uk3` instruction. */
5553
+static void __attribute__((unused))
5554
+tcg_out_opc_vsat_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5555
+{
5556
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_B, vd, vj, uk3));
5557
+}
5558
+
5559
+/* Emits the `vsat.h vd, vj, uk4` instruction. */
5560
+static void __attribute__((unused))
5561
+tcg_out_opc_vsat_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5562
+{
5563
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_H, vd, vj, uk4));
5564
+}
5565
+
5566
+/* Emits the `vsat.w vd, vj, uk5` instruction. */
5567
+static void __attribute__((unused))
5568
+tcg_out_opc_vsat_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5569
+{
5570
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_W, vd, vj, uk5));
5571
+}
5572
+
5573
+/* Emits the `vsat.d vd, vj, uk6` instruction. */
5574
+static void __attribute__((unused))
5575
+tcg_out_opc_vsat_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5576
+{
5577
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_D, vd, vj, uk6));
5578
+}
5579
+
5580
+/* Emits the `vsat.bu vd, vj, uk3` instruction. */
5581
+static void __attribute__((unused))
5582
+tcg_out_opc_vsat_bu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5583
+{
5584
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSAT_BU, vd, vj, uk3));
5585
+}
5586
+
5587
+/* Emits the `vsat.hu vd, vj, uk4` instruction. */
5588
+static void __attribute__((unused))
5589
+tcg_out_opc_vsat_hu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5590
+{
5591
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSAT_HU, vd, vj, uk4));
5592
+}
5593
+
5594
+/* Emits the `vsat.wu vd, vj, uk5` instruction. */
5595
+static void __attribute__((unused))
5596
+tcg_out_opc_vsat_wu(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5597
+{
5598
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSAT_WU, vd, vj, uk5));
5599
+}
5600
+
5601
+/* Emits the `vsat.du vd, vj, uk6` instruction. */
5602
+static void __attribute__((unused))
5603
+tcg_out_opc_vsat_du(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5604
+{
5605
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSAT_DU, vd, vj, uk6));
5606
+}
5607
+
5608
+/* Emits the `vslli.b vd, vj, uk3` instruction. */
5609
+static void __attribute__((unused))
5610
+tcg_out_opc_vslli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5611
+{
5612
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSLLI_B, vd, vj, uk3));
5613
+}
5614
+
5615
+/* Emits the `vslli.h vd, vj, uk4` instruction. */
5616
+static void __attribute__((unused))
5617
+tcg_out_opc_vslli_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5618
+{
5619
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSLLI_H, vd, vj, uk4));
5620
+}
5621
+
5622
+/* Emits the `vslli.w vd, vj, uk5` instruction. */
5623
+static void __attribute__((unused))
5624
+tcg_out_opc_vslli_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5625
+{
5626
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSLLI_W, vd, vj, uk5));
5627
+}
5628
+
5629
+/* Emits the `vslli.d vd, vj, uk6` instruction. */
5630
+static void __attribute__((unused))
5631
+tcg_out_opc_vslli_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5632
+{
5633
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSLLI_D, vd, vj, uk6));
5634
+}
5635
+
5636
+/* Emits the `vsrli.b vd, vj, uk3` instruction. */
5637
+static void __attribute__((unused))
5638
+tcg_out_opc_vsrli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5639
+{
5640
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRLI_B, vd, vj, uk3));
5641
+}
5642
+
5643
+/* Emits the `vsrli.h vd, vj, uk4` instruction. */
5644
+static void __attribute__((unused))
5645
+tcg_out_opc_vsrli_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5646
+{
5647
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLI_H, vd, vj, uk4));
5648
+}
5649
+
5650
+/* Emits the `vsrli.w vd, vj, uk5` instruction. */
5651
+static void __attribute__((unused))
5652
+tcg_out_opc_vsrli_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5653
+{
5654
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLI_W, vd, vj, uk5));
5655
+}
5656
+
5657
+/* Emits the `vsrli.d vd, vj, uk6` instruction. */
5658
+static void __attribute__((unused))
5659
+tcg_out_opc_vsrli_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5660
+{
5661
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLI_D, vd, vj, uk6));
5662
+}
5663
+
5664
+/* Emits the `vsrai.b vd, vj, uk3` instruction. */
5665
+static void __attribute__((unused))
5666
+tcg_out_opc_vsrai_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk3)
5667
+{
5668
+ tcg_out32(s, encode_vdvjuk3_insn(OPC_VSRAI_B, vd, vj, uk3));
5669
+}
5670
+
5671
+/* Emits the `vsrai.h vd, vj, uk4` instruction. */
5672
+static void __attribute__((unused))
5673
+tcg_out_opc_vsrai_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5674
+{
5675
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRAI_H, vd, vj, uk4));
5676
+}
5677
+
5678
+/* Emits the `vsrai.w vd, vj, uk5` instruction. */
5679
+static void __attribute__((unused))
5680
+tcg_out_opc_vsrai_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5681
+{
5682
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRAI_W, vd, vj, uk5));
5683
+}
5684
+
5685
+/* Emits the `vsrai.d vd, vj, uk6` instruction. */
5686
+static void __attribute__((unused))
5687
+tcg_out_opc_vsrai_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5688
+{
5689
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRAI_D, vd, vj, uk6));
5690
+}
5691
+
5692
+/* Emits the `vsrlni.b.h vd, vj, uk4` instruction. */
5693
+static void __attribute__((unused))
5694
+tcg_out_opc_vsrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5695
+{
5696
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLNI_B_H, vd, vj, uk4));
5697
+}
5698
+
5699
+/* Emits the `vsrlni.h.w vd, vj, uk5` instruction. */
5700
+static void __attribute__((unused))
5701
+tcg_out_opc_vsrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5702
+{
5703
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLNI_H_W, vd, vj, uk5));
5704
+}
5705
+
5706
+/* Emits the `vsrlni.w.d vd, vj, uk6` instruction. */
5707
+static void __attribute__((unused))
5708
+tcg_out_opc_vsrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5709
+{
5710
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLNI_W_D, vd, vj, uk6));
5711
+}
5712
+
5713
+/* Emits the `vsrlni.d.q vd, vj, uk7` instruction. */
5714
+static void __attribute__((unused))
5715
+tcg_out_opc_vsrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5716
+{
5717
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLNI_D_Q, vd, vj, uk7));
5718
+}
5719
+
5720
+/* Emits the `vsrlrni.b.h vd, vj, uk4` instruction. */
5721
+static void __attribute__((unused))
5722
+tcg_out_opc_vsrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5723
+{
5724
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRLRNI_B_H, vd, vj, uk4));
5725
+}
5726
+
5727
+/* Emits the `vsrlrni.h.w vd, vj, uk5` instruction. */
5728
+static void __attribute__((unused))
5729
+tcg_out_opc_vsrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5730
+{
5731
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRLRNI_H_W, vd, vj, uk5));
5732
+}
5733
+
5734
+/* Emits the `vsrlrni.w.d vd, vj, uk6` instruction. */
5735
+static void __attribute__((unused))
5736
+tcg_out_opc_vsrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5737
+{
5738
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRLRNI_W_D, vd, vj, uk6));
5739
+}
5740
+
5741
+/* Emits the `vsrlrni.d.q vd, vj, uk7` instruction. */
5742
+static void __attribute__((unused))
5743
+tcg_out_opc_vsrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5744
+{
5745
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRLRNI_D_Q, vd, vj, uk7));
5746
+}
5747
+
5748
+/* Emits the `vssrlni.b.h vd, vj, uk4` instruction. */
5749
+static void __attribute__((unused))
5750
+tcg_out_opc_vssrlni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5751
+{
5752
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_B_H, vd, vj, uk4));
5753
+}
5754
+
5755
+/* Emits the `vssrlni.h.w vd, vj, uk5` instruction. */
5756
+static void __attribute__((unused))
5757
+tcg_out_opc_vssrlni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5758
+{
5759
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_H_W, vd, vj, uk5));
5760
+}
5761
+
5762
+/* Emits the `vssrlni.w.d vd, vj, uk6` instruction. */
5763
+static void __attribute__((unused))
5764
+tcg_out_opc_vssrlni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5765
+{
5766
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_W_D, vd, vj, uk6));
5767
+}
5768
+
5769
+/* Emits the `vssrlni.d.q vd, vj, uk7` instruction. */
5770
+static void __attribute__((unused))
5771
+tcg_out_opc_vssrlni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5772
+{
5773
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_D_Q, vd, vj, uk7));
5774
+}
5775
+
5776
+/* Emits the `vssrlni.bu.h vd, vj, uk4` instruction. */
5777
+static void __attribute__((unused))
5778
+tcg_out_opc_vssrlni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5779
+{
5780
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLNI_BU_H, vd, vj, uk4));
5781
+}
5782
+
5783
+/* Emits the `vssrlni.hu.w vd, vj, uk5` instruction. */
5784
+static void __attribute__((unused))
5785
+tcg_out_opc_vssrlni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5786
+{
5787
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLNI_HU_W, vd, vj, uk5));
5788
+}
5789
+
5790
+/* Emits the `vssrlni.wu.d vd, vj, uk6` instruction. */
5791
+static void __attribute__((unused))
5792
+tcg_out_opc_vssrlni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5793
+{
5794
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLNI_WU_D, vd, vj, uk6));
5795
+}
5796
+
5797
+/* Emits the `vssrlni.du.q vd, vj, uk7` instruction. */
5798
+static void __attribute__((unused))
5799
+tcg_out_opc_vssrlni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5800
+{
5801
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLNI_DU_Q, vd, vj, uk7));
5802
+}
5803
+
5804
+/* Emits the `vssrlrni.b.h vd, vj, uk4` instruction. */
5805
+static void __attribute__((unused))
5806
+tcg_out_opc_vssrlrni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5807
+{
5808
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_B_H, vd, vj, uk4));
5809
+}
5810
+
5811
+/* Emits the `vssrlrni.h.w vd, vj, uk5` instruction. */
5812
+static void __attribute__((unused))
5813
+tcg_out_opc_vssrlrni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5814
+{
5815
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_H_W, vd, vj, uk5));
5816
+}
5817
+
5818
+/* Emits the `vssrlrni.w.d vd, vj, uk6` instruction. */
5819
+static void __attribute__((unused))
5820
+tcg_out_opc_vssrlrni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5821
+{
5822
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_W_D, vd, vj, uk6));
5823
+}
5824
+
5825
+/* Emits the `vssrlrni.d.q vd, vj, uk7` instruction. */
5826
+static void __attribute__((unused))
5827
+tcg_out_opc_vssrlrni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5828
+{
5829
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_D_Q, vd, vj, uk7));
5830
+}
5831
+
5832
+/* Emits the `vssrlrni.bu.h vd, vj, uk4` instruction. */
5833
+static void __attribute__((unused))
5834
+tcg_out_opc_vssrlrni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5835
+{
5836
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRLRNI_BU_H, vd, vj, uk4));
5837
+}
5838
+
5839
+/* Emits the `vssrlrni.hu.w vd, vj, uk5` instruction. */
5840
+static void __attribute__((unused))
5841
+tcg_out_opc_vssrlrni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5842
+{
5843
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRLRNI_HU_W, vd, vj, uk5));
5844
+}
5845
+
5846
+/* Emits the `vssrlrni.wu.d vd, vj, uk6` instruction. */
5847
+static void __attribute__((unused))
5848
+tcg_out_opc_vssrlrni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5849
+{
5850
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRLRNI_WU_D, vd, vj, uk6));
5851
+}
5852
+
5853
+/* Emits the `vssrlrni.du.q vd, vj, uk7` instruction. */
5854
+static void __attribute__((unused))
5855
+tcg_out_opc_vssrlrni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5856
+{
5857
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRLRNI_DU_Q, vd, vj, uk7));
5858
+}
5859
+
5860
+/* Emits the `vsrani.b.h vd, vj, uk4` instruction. */
5861
+static void __attribute__((unused))
5862
+tcg_out_opc_vsrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5863
+{
5864
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRANI_B_H, vd, vj, uk4));
5865
+}
5866
+
5867
+/* Emits the `vsrani.h.w vd, vj, uk5` instruction. */
5868
+static void __attribute__((unused))
5869
+tcg_out_opc_vsrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5870
+{
5871
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRANI_H_W, vd, vj, uk5));
5872
+}
5873
+
5874
+/* Emits the `vsrani.w.d vd, vj, uk6` instruction. */
5875
+static void __attribute__((unused))
5876
+tcg_out_opc_vsrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5877
+{
5878
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRANI_W_D, vd, vj, uk6));
5879
+}
5880
+
5881
+/* Emits the `vsrani.d.q vd, vj, uk7` instruction. */
5882
+static void __attribute__((unused))
5883
+tcg_out_opc_vsrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5884
+{
5885
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRANI_D_Q, vd, vj, uk7));
5886
+}
5887
+
5888
+/* Emits the `vsrarni.b.h vd, vj, uk4` instruction. */
5889
+static void __attribute__((unused))
5890
+tcg_out_opc_vsrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5891
+{
5892
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSRARNI_B_H, vd, vj, uk4));
5893
+}
5894
+
5895
+/* Emits the `vsrarni.h.w vd, vj, uk5` instruction. */
5896
+static void __attribute__((unused))
5897
+tcg_out_opc_vsrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5898
+{
5899
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSRARNI_H_W, vd, vj, uk5));
5900
+}
5901
+
5902
+/* Emits the `vsrarni.w.d vd, vj, uk6` instruction. */
5903
+static void __attribute__((unused))
5904
+tcg_out_opc_vsrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5905
+{
5906
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSRARNI_W_D, vd, vj, uk6));
5907
+}
5908
+
5909
+/* Emits the `vsrarni.d.q vd, vj, uk7` instruction. */
5910
+static void __attribute__((unused))
5911
+tcg_out_opc_vsrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5912
+{
5913
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSRARNI_D_Q, vd, vj, uk7));
5914
+}
5915
+
5916
+/* Emits the `vssrani.b.h vd, vj, uk4` instruction. */
5917
+static void __attribute__((unused))
5918
+tcg_out_opc_vssrani_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5919
+{
5920
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_B_H, vd, vj, uk4));
5921
+}
5922
+
5923
+/* Emits the `vssrani.h.w vd, vj, uk5` instruction. */
5924
+static void __attribute__((unused))
5925
+tcg_out_opc_vssrani_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5926
+{
5927
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_H_W, vd, vj, uk5));
5928
+}
5929
+
5930
+/* Emits the `vssrani.w.d vd, vj, uk6` instruction. */
5931
+static void __attribute__((unused))
5932
+tcg_out_opc_vssrani_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5933
+{
5934
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_W_D, vd, vj, uk6));
5935
+}
5936
+
5937
+/* Emits the `vssrani.d.q vd, vj, uk7` instruction. */
5938
+static void __attribute__((unused))
5939
+tcg_out_opc_vssrani_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5940
+{
5941
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_D_Q, vd, vj, uk7));
5942
+}
5943
+
5944
+/* Emits the `vssrani.bu.h vd, vj, uk4` instruction. */
5945
+static void __attribute__((unused))
5946
+tcg_out_opc_vssrani_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5947
+{
5948
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRANI_BU_H, vd, vj, uk4));
5949
+}
5950
+
5951
+/* Emits the `vssrani.hu.w vd, vj, uk5` instruction. */
5952
+static void __attribute__((unused))
5953
+tcg_out_opc_vssrani_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5954
+{
5955
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRANI_HU_W, vd, vj, uk5));
5956
+}
5957
+
5958
+/* Emits the `vssrani.wu.d vd, vj, uk6` instruction. */
5959
+static void __attribute__((unused))
5960
+tcg_out_opc_vssrani_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5961
+{
5962
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRANI_WU_D, vd, vj, uk6));
5963
+}
5964
+
5965
+/* Emits the `vssrani.du.q vd, vj, uk7` instruction. */
5966
+static void __attribute__((unused))
5967
+tcg_out_opc_vssrani_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5968
+{
5969
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRANI_DU_Q, vd, vj, uk7));
5970
+}
5971
+
5972
+/* Emits the `vssrarni.b.h vd, vj, uk4` instruction. */
5973
+static void __attribute__((unused))
5974
+tcg_out_opc_vssrarni_b_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
5975
+{
5976
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_B_H, vd, vj, uk4));
5977
+}
5978
+
5979
+/* Emits the `vssrarni.h.w vd, vj, uk5` instruction. */
5980
+static void __attribute__((unused))
5981
+tcg_out_opc_vssrarni_h_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
5982
+{
5983
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_H_W, vd, vj, uk5));
5984
+}
5985
+
5986
+/* Emits the `vssrarni.w.d vd, vj, uk6` instruction. */
5987
+static void __attribute__((unused))
5988
+tcg_out_opc_vssrarni_w_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
5989
+{
5990
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_W_D, vd, vj, uk6));
5991
+}
5992
+
5993
+/* Emits the `vssrarni.d.q vd, vj, uk7` instruction. */
5994
+static void __attribute__((unused))
5995
+tcg_out_opc_vssrarni_d_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
5996
+{
5997
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_D_Q, vd, vj, uk7));
5998
+}
5999
+
6000
+/* Emits the `vssrarni.bu.h vd, vj, uk4` instruction. */
6001
+static void __attribute__((unused))
6002
+tcg_out_opc_vssrarni_bu_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk4)
6003
+{
6004
+ tcg_out32(s, encode_vdvjuk4_insn(OPC_VSSRARNI_BU_H, vd, vj, uk4));
6005
+}
6006
+
6007
+/* Emits the `vssrarni.hu.w vd, vj, uk5` instruction. */
6008
+static void __attribute__((unused))
6009
+tcg_out_opc_vssrarni_hu_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk5)
6010
+{
6011
+ tcg_out32(s, encode_vdvjuk5_insn(OPC_VSSRARNI_HU_W, vd, vj, uk5));
6012
+}
6013
+
6014
+/* Emits the `vssrarni.wu.d vd, vj, uk6` instruction. */
6015
+static void __attribute__((unused))
6016
+tcg_out_opc_vssrarni_wu_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk6)
6017
+{
6018
+ tcg_out32(s, encode_vdvjuk6_insn(OPC_VSSRARNI_WU_D, vd, vj, uk6));
6019
+}
6020
+
6021
+/* Emits the `vssrarni.du.q vd, vj, uk7` instruction. */
6022
+static void __attribute__((unused))
6023
+tcg_out_opc_vssrarni_du_q(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk7)
6024
+{
6025
+ tcg_out32(s, encode_vdvjuk7_insn(OPC_VSSRARNI_DU_Q, vd, vj, uk7));
6026
+}
6027
+
6028
+/* Emits the `vextrins.d vd, vj, uk8` instruction. */
6029
+static void __attribute__((unused))
6030
+tcg_out_opc_vextrins_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6031
+{
6032
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_D, vd, vj, uk8));
6033
+}
6034
+
6035
+/* Emits the `vextrins.w vd, vj, uk8` instruction. */
6036
+static void __attribute__((unused))
6037
+tcg_out_opc_vextrins_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6038
+{
6039
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_W, vd, vj, uk8));
6040
+}
6041
+
6042
+/* Emits the `vextrins.h vd, vj, uk8` instruction. */
6043
+static void __attribute__((unused))
6044
+tcg_out_opc_vextrins_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6045
+{
6046
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_H, vd, vj, uk8));
6047
+}
6048
+
6049
+/* Emits the `vextrins.b vd, vj, uk8` instruction. */
6050
+static void __attribute__((unused))
6051
+tcg_out_opc_vextrins_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6052
+{
6053
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VEXTRINS_B, vd, vj, uk8));
6054
+}
6055
+
6056
+/* Emits the `vshuf4i.b vd, vj, uk8` instruction. */
6057
+static void __attribute__((unused))
6058
+tcg_out_opc_vshuf4i_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6059
+{
6060
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_B, vd, vj, uk8));
6061
+}
6062
+
6063
+/* Emits the `vshuf4i.h vd, vj, uk8` instruction. */
6064
+static void __attribute__((unused))
6065
+tcg_out_opc_vshuf4i_h(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6066
+{
6067
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_H, vd, vj, uk8));
6068
+}
6069
+
6070
+/* Emits the `vshuf4i.w vd, vj, uk8` instruction. */
6071
+static void __attribute__((unused))
6072
+tcg_out_opc_vshuf4i_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6073
+{
6074
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_W, vd, vj, uk8));
6075
+}
6076
+
6077
+/* Emits the `vshuf4i.d vd, vj, uk8` instruction. */
6078
+static void __attribute__((unused))
6079
+tcg_out_opc_vshuf4i_d(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6080
+{
6081
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VSHUF4I_D, vd, vj, uk8));
6082
+}
6083
+
6084
+/* Emits the `vbitseli.b vd, vj, uk8` instruction. */
6085
+static void __attribute__((unused))
6086
+tcg_out_opc_vbitseli_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6087
+{
6088
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VBITSELI_B, vd, vj, uk8));
6089
+}
6090
+
6091
+/* Emits the `vandi.b vd, vj, uk8` instruction. */
6092
+static void __attribute__((unused))
6093
+tcg_out_opc_vandi_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6094
+{
6095
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VANDI_B, vd, vj, uk8));
6096
+}
6097
+
6098
+/* Emits the `vori.b vd, vj, uk8` instruction. */
6099
+static void __attribute__((unused))
6100
+tcg_out_opc_vori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6101
+{
6102
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VORI_B, vd, vj, uk8));
6103
+}
6104
+
6105
+/* Emits the `vxori.b vd, vj, uk8` instruction. */
6106
+static void __attribute__((unused))
6107
+tcg_out_opc_vxori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6108
+{
6109
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VXORI_B, vd, vj, uk8));
6110
+}
6111
+
6112
+/* Emits the `vnori.b vd, vj, uk8` instruction. */
6113
+static void __attribute__((unused))
6114
+tcg_out_opc_vnori_b(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6115
+{
6116
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VNORI_B, vd, vj, uk8));
6117
+}
6118
+
6119
+/* Emits the `vldi vd, sj13` instruction. */
6120
+static void __attribute__((unused))
6121
+tcg_out_opc_vldi(TCGContext *s, TCGReg vd, int32_t sj13)
6122
+{
6123
+ tcg_out32(s, encode_vdsj13_insn(OPC_VLDI, vd, sj13));
6124
+}
6125
+
6126
+/* Emits the `vpermi.w vd, vj, uk8` instruction. */
6127
+static void __attribute__((unused))
6128
+tcg_out_opc_vpermi_w(TCGContext *s, TCGReg vd, TCGReg vj, uint32_t uk8)
6129
+{
6130
+ tcg_out32(s, encode_vdvjuk8_insn(OPC_VPERMI_W, vd, vj, uk8));
6131
+}
6132
+
6133
/* End of generated code. */
154
--
6134
--
155
2.25.1
6135
2.34.1
156
157
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Jiajie Chen <c@jia.je>
2
2
3
Introduce a structure to hold handler specific to sysemu.
3
LSX support on host cpu is detected via hwcap.
4
4
5
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
Lower the following ops to LSX:
6
7
- dup_vec
8
- dupi_vec
9
- dupm_vec
10
- ld_vec
11
- st_vec
12
13
Signed-off-by: Jiajie Chen <c@jia.je>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-Id: <20210517105140.1062037-15-f4bug@amsat.org>
15
Message-Id: <20230908022302.180442-3-c@jia.je>
8
[rth: Squash "restrict hw/core/sysemu-cpu-ops.h" patch]
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
17
---
11
include/hw/core/cpu.h | 6 ++++++
18
tcg/loongarch64/tcg-target-con-set.h | 2 +
12
include/hw/core/sysemu-cpu-ops.h | 21 +++++++++++++++++++++
19
tcg/loongarch64/tcg-target-con-str.h | 1 +
13
cpu.c | 1 +
20
tcg/loongarch64/tcg-target.h | 38 ++++-
14
target/alpha/cpu.c | 8 ++++++++
21
tcg/loongarch64/tcg-target.opc.h | 12 ++
15
target/arm/cpu.c | 8 ++++++++
22
tcg/loongarch64/tcg-target.c.inc | 219 ++++++++++++++++++++++++++-
16
target/avr/cpu.c | 6 ++++++
23
5 files changed, 270 insertions(+), 2 deletions(-)
17
target/cris/cpu.c | 8 ++++++++
24
create mode 100644 tcg/loongarch64/tcg-target.opc.h
18
target/hppa/cpu.c | 8 ++++++++
19
target/i386/cpu.c | 8 ++++++++
20
target/m68k/cpu.c | 8 ++++++++
21
target/microblaze/cpu.c | 8 ++++++++
22
target/mips/cpu.c | 8 ++++++++
23
target/nios2/cpu.c | 8 ++++++++
24
target/openrisc/cpu.c | 8 ++++++++
25
target/ppc/cpu_init.c | 8 ++++++++
26
target/riscv/cpu.c | 8 ++++++++
27
target/rx/cpu.c | 10 ++++++++++
28
target/s390x/cpu.c | 8 ++++++++
29
target/sh4/cpu.c | 6 ++++++
30
target/sparc/cpu.c | 8 ++++++++
31
target/tricore/cpu.c | 6 ++++++
32
target/xtensa/cpu.c | 6 ++++++
33
22 files changed, 174 insertions(+)
34
create mode 100644 include/hw/core/sysemu-cpu-ops.h
35
25
36
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
26
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
37
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
38
--- a/include/hw/core/cpu.h
28
--- a/tcg/loongarch64/tcg-target-con-set.h
39
+++ b/include/hw/core/cpu.h
29
+++ b/tcg/loongarch64/tcg-target-con-set.h
40
@@ -XXX,XX +XXX,XX @@ struct TCGCPUOps;
30
@@ -XXX,XX +XXX,XX @@
41
/* see accel-cpu.h */
31
C_O0_I1(r)
42
struct AccelCPUClass;
32
C_O0_I2(rZ, r)
43
33
C_O0_I2(rZ, rZ)
44
+/* see sysemu-cpu-ops.h */
34
+C_O0_I2(w, r)
45
+struct SysemuCPUOps;
35
C_O1_I1(r, r)
46
+
36
+C_O1_I1(w, r)
47
/**
37
C_O1_I2(r, r, rC)
48
* CPUClass:
38
C_O1_I2(r, r, ri)
49
* @class_by_name: Callback to map -cpu command line model name to an
39
C_O1_I2(r, r, rI)
50
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
40
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
51
bool gdb_stop_before_watchpoint;
41
index XXXXXXX..XXXXXXX 100644
52
struct AccelCPUClass *accel_cpu;
42
--- a/tcg/loongarch64/tcg-target-con-str.h
53
43
+++ b/tcg/loongarch64/tcg-target-con-str.h
54
+ /* when system emulation is not available, this pointer is NULL */
44
@@ -XXX,XX +XXX,XX @@
55
+ const struct SysemuCPUOps *sysemu_ops;
45
* REGS(letter, register_mask)
56
+
46
*/
57
/* when TCG is not available, this pointer is NULL */
47
REGS('r', ALL_GENERAL_REGS)
58
struct TCGCPUOps *tcg_ops;
48
+REGS('w', ALL_VECTOR_REGS)
59
49
60
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
50
/*
51
* Define constraint letters for constants:
52
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
53
index XXXXXXX..XXXXXXX 100644
54
--- a/tcg/loongarch64/tcg-target.h
55
+++ b/tcg/loongarch64/tcg-target.h
56
@@ -XXX,XX +XXX,XX @@
57
#define LOONGARCH_TCG_TARGET_H
58
59
#define TCG_TARGET_INSN_UNIT_SIZE 4
60
-#define TCG_TARGET_NB_REGS 32
61
+#define TCG_TARGET_NB_REGS 64
62
63
#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
64
65
@@ -XXX,XX +XXX,XX @@ typedef enum {
66
TCG_REG_S7,
67
TCG_REG_S8,
68
69
+ TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
70
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
71
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
72
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
73
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
74
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
75
+ TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27,
76
+ TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
77
+
78
/* aliases */
79
TCG_AREG0 = TCG_REG_S0,
80
TCG_REG_TMP0 = TCG_REG_T8,
81
TCG_REG_TMP1 = TCG_REG_T7,
82
TCG_REG_TMP2 = TCG_REG_T6,
83
+ TCG_VEC_TMP0 = TCG_REG_V23,
84
} TCGReg;
85
86
+extern bool use_lsx_instructions;
87
+
88
/* used for function call generation */
89
#define TCG_REG_CALL_STACK TCG_REG_SP
90
#define TCG_TARGET_STACK_ALIGN 16
91
@@ -XXX,XX +XXX,XX @@ typedef enum {
92
93
#define TCG_TARGET_HAS_qemu_ldst_i128 0
94
95
+#define TCG_TARGET_HAS_v64 0
96
+#define TCG_TARGET_HAS_v128 use_lsx_instructions
97
+#define TCG_TARGET_HAS_v256 0
98
+
99
+#define TCG_TARGET_HAS_not_vec 0
100
+#define TCG_TARGET_HAS_neg_vec 0
101
+#define TCG_TARGET_HAS_abs_vec 0
102
+#define TCG_TARGET_HAS_andc_vec 0
103
+#define TCG_TARGET_HAS_orc_vec 0
104
+#define TCG_TARGET_HAS_nand_vec 0
105
+#define TCG_TARGET_HAS_nor_vec 0
106
+#define TCG_TARGET_HAS_eqv_vec 0
107
+#define TCG_TARGET_HAS_mul_vec 0
108
+#define TCG_TARGET_HAS_shi_vec 0
109
+#define TCG_TARGET_HAS_shs_vec 0
110
+#define TCG_TARGET_HAS_shv_vec 0
111
+#define TCG_TARGET_HAS_roti_vec 0
112
+#define TCG_TARGET_HAS_rots_vec 0
113
+#define TCG_TARGET_HAS_rotv_vec 0
114
+#define TCG_TARGET_HAS_sat_vec 0
115
+#define TCG_TARGET_HAS_minmax_vec 0
116
+#define TCG_TARGET_HAS_bitsel_vec 0
117
+#define TCG_TARGET_HAS_cmpsel_vec 0
118
+
119
#define TCG_TARGET_DEFAULT_MO (0)
120
121
#define TCG_TARGET_NEED_LDST_LABELS
122
diff --git a/tcg/loongarch64/tcg-target.opc.h b/tcg/loongarch64/tcg-target.opc.h
61
new file mode 100644
123
new file mode 100644
62
index XXXXXXX..XXXXXXX
124
index XXXXXXX..XXXXXXX
63
--- /dev/null
125
--- /dev/null
64
+++ b/include/hw/core/sysemu-cpu-ops.h
126
+++ b/tcg/loongarch64/tcg-target.opc.h
65
@@ -XXX,XX +XXX,XX @@
127
@@ -XXX,XX +XXX,XX @@
66
+/*
128
+/*
67
+ * CPU operations specific to system emulation
129
+ * Copyright (c) 2023 Jiajie Chen
68
+ *
130
+ *
69
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
131
+ * This work is licensed under the terms of the GNU GPL, version 2 or
132
+ * (at your option) any later version.
70
+ *
133
+ *
71
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
134
+ * See the COPYING file in the top-level directory for details.
72
+ * See the COPYING file in the top-level directory.
135
+ *
136
+ * Target-specific opcodes for host vector expansion. These will be
137
+ * emitted by tcg_expand_vec_op. For those familiar with GCC internals,
138
+ * consider these to be UNSPEC with names.
73
+ */
139
+ */
74
+
140
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
75
+#ifndef SYSEMU_CPU_OPS_H
76
+#define SYSEMU_CPU_OPS_H
77
+
78
+#include "hw/core/cpu.h"
79
+
80
+/*
81
+ * struct SysemuCPUOps: System operations specific to a CPU class
82
+ */
83
+typedef struct SysemuCPUOps {
84
+} SysemuCPUOps;
85
+
86
+#endif /* SYSEMU_CPU_OPS_H */
87
diff --git a/cpu.c b/cpu.c
88
index XXXXXXX..XXXXXXX 100644
141
index XXXXXXX..XXXXXXX 100644
89
--- a/cpu.c
142
--- a/tcg/loongarch64/tcg-target.c.inc
90
+++ b/cpu.c
143
+++ b/tcg/loongarch64/tcg-target.c.inc
91
@@ -XXX,XX +XXX,XX @@
144
@@ -XXX,XX +XXX,XX @@
92
#ifdef CONFIG_USER_ONLY
145
#include "../tcg-ldst.c.inc"
93
#include "qemu.h"
146
#include <asm/hwcap.h>
94
#else
147
95
+#include "hw/core/sysemu-cpu-ops.h"
148
+bool use_lsx_instructions;
96
#include "exec/address-spaces.h"
149
+
97
#endif
150
#ifdef CONFIG_DEBUG_TCG
98
#include "sysemu/tcg.h"
151
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
99
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
152
"zero",
100
index XXXXXXX..XXXXXXX 100644
153
@@ -XXX,XX +XXX,XX @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
101
--- a/target/alpha/cpu.c
154
"s5",
102
+++ b/target/alpha/cpu.c
155
"s6",
103
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_initfn(Object *obj)
156
"s7",
104
#endif
157
- "s8"
105
}
158
+ "s8",
106
159
+ "vr0",
107
+#ifndef CONFIG_USER_ONLY
160
+ "vr1",
108
+#include "hw/core/sysemu-cpu-ops.h"
161
+ "vr2",
109
+
162
+ "vr3",
110
+static const struct SysemuCPUOps alpha_sysemu_ops = {
163
+ "vr4",
111
+};
164
+ "vr5",
112
+#endif
165
+ "vr6",
113
+
166
+ "vr7",
114
#include "hw/core/tcg-cpu-ops.h"
167
+ "vr8",
115
168
+ "vr9",
116
static struct TCGCPUOps alpha_tcg_ops = {
169
+ "vr10",
117
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
170
+ "vr11",
118
#ifndef CONFIG_USER_ONLY
171
+ "vr12",
119
cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
172
+ "vr13",
120
dc->vmsd = &vmstate_alpha_cpu;
173
+ "vr14",
121
+ cc->sysemu_ops = &alpha_sysemu_ops;
174
+ "vr15",
122
#endif
175
+ "vr16",
123
cc->disas_set_info = alpha_cpu_disas_set_info;
176
+ "vr17",
124
177
+ "vr18",
125
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
178
+ "vr19",
126
index XXXXXXX..XXXXXXX 100644
179
+ "vr20",
127
--- a/target/arm/cpu.c
180
+ "vr21",
128
+++ b/target/arm/cpu.c
181
+ "vr22",
129
@@ -XXX,XX +XXX,XX @@ static gchar *arm_gdb_arch_name(CPUState *cs)
182
+ "vr23",
130
return g_strdup("arm");
183
+ "vr24",
131
}
184
+ "vr25",
132
185
+ "vr26",
133
+#ifndef CONFIG_USER_ONLY
186
+ "vr27",
134
+#include "hw/core/sysemu-cpu-ops.h"
187
+ "vr28",
135
+
188
+ "vr29",
136
+static const struct SysemuCPUOps arm_sysemu_ops = {
189
+ "vr30",
137
+};
190
+ "vr31",
138
+#endif
139
+
140
#ifdef CONFIG_TCG
141
static struct TCGCPUOps arm_tcg_ops = {
142
.initialize = arm_translate_init,
143
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
144
cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
145
cc->write_elf64_note = arm_cpu_write_elf64_note;
146
cc->write_elf32_note = arm_cpu_write_elf32_note;
147
+ cc->sysemu_ops = &arm_sysemu_ops;
148
#endif
149
cc->gdb_num_core_regs = 26;
150
cc->gdb_core_xml_file = "arm-core.xml";
151
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
152
index XXXXXXX..XXXXXXX 100644
153
--- a/target/avr/cpu.c
154
+++ b/target/avr/cpu.c
155
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags)
156
qemu_fprintf(f, "\n");
157
}
158
159
+#include "hw/core/sysemu-cpu-ops.h"
160
+
161
+static const struct SysemuCPUOps avr_sysemu_ops = {
162
+};
163
+
164
#include "hw/core/tcg-cpu-ops.h"
165
166
static struct TCGCPUOps avr_tcg_ops = {
167
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
168
cc->memory_rw_debug = avr_cpu_memory_rw_debug;
169
cc->get_phys_page_debug = avr_cpu_get_phys_page_debug;
170
dc->vmsd = &vms_avr_cpu;
171
+ cc->sysemu_ops = &avr_sysemu_ops;
172
cc->disas_set_info = avr_cpu_disas_set_info;
173
cc->gdb_read_register = avr_cpu_gdb_read_register;
174
cc->gdb_write_register = avr_cpu_gdb_write_register;
175
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
176
index XXXXXXX..XXXXXXX 100644
177
--- a/target/cris/cpu.c
178
+++ b/target/cris/cpu.c
179
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_initfn(Object *obj)
180
#endif
181
}
182
183
+#ifndef CONFIG_USER_ONLY
184
+#include "hw/core/sysemu-cpu-ops.h"
185
+
186
+static const struct SysemuCPUOps cris_sysemu_ops = {
187
+};
188
+#endif
189
+
190
#include "hw/core/tcg-cpu-ops.h"
191
192
static struct TCGCPUOps crisv10_tcg_ops = {
193
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
194
#ifndef CONFIG_USER_ONLY
195
cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
196
dc->vmsd = &vmstate_cris_cpu;
197
+ cc->sysemu_ops = &cris_sysemu_ops;
198
#endif
199
200
cc->gdb_num_core_regs = 49;
201
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
202
index XXXXXXX..XXXXXXX 100644
203
--- a/target/hppa/cpu.c
204
+++ b/target/hppa/cpu.c
205
@@ -XXX,XX +XXX,XX @@ static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
206
return object_class_by_name(TYPE_HPPA_CPU);
207
}
208
209
+#ifndef CONFIG_USER_ONLY
210
+#include "hw/core/sysemu-cpu-ops.h"
211
+
212
+static const struct SysemuCPUOps hppa_sysemu_ops = {
213
+};
214
+#endif
215
+
216
#include "hw/core/tcg-cpu-ops.h"
217
218
static struct TCGCPUOps hppa_tcg_ops = {
219
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
220
#ifndef CONFIG_USER_ONLY
221
cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
222
dc->vmsd = &vmstate_hppa_cpu;
223
+ cc->sysemu_ops = &hppa_sysemu_ops;
224
#endif
225
cc->disas_set_info = hppa_cpu_disas_set_info;
226
cc->gdb_num_core_regs = 128;
227
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
228
index XXXXXXX..XXXXXXX 100644
229
--- a/target/i386/cpu.c
230
+++ b/target/i386/cpu.c
231
@@ -XXX,XX +XXX,XX @@ static Property x86_cpu_properties[] = {
232
DEFINE_PROP_END_OF_LIST()
233
};
234
235
+#ifndef CONFIG_USER_ONLY
236
+#include "hw/core/sysemu-cpu-ops.h"
237
+
238
+static const struct SysemuCPUOps i386_sysemu_ops = {
239
+};
240
+#endif
241
+
242
static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
243
{
244
X86CPUClass *xcc = X86_CPU_CLASS(oc);
245
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
246
cc->write_elf32_note = x86_cpu_write_elf32_note;
247
cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
248
cc->legacy_vmsd = &vmstate_x86_cpu;
249
+ cc->sysemu_ops = &i386_sysemu_ops;
250
#endif /* !CONFIG_USER_ONLY */
251
252
cc->gdb_arch_name = x86_gdb_arch_name;
253
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
254
index XXXXXXX..XXXXXXX 100644
255
--- a/target/m68k/cpu.c
256
+++ b/target/m68k/cpu.c
257
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m68k_cpu = {
258
};
191
};
259
#endif
192
#endif
260
193
261
+#ifndef CONFIG_USER_ONLY
194
@@ -XXX,XX +XXX,XX @@ static const int tcg_target_reg_alloc_order[] = {
262
+#include "hw/core/sysemu-cpu-ops.h"
195
TCG_REG_A2,
263
+
196
TCG_REG_A1,
264
+static const struct SysemuCPUOps m68k_sysemu_ops = {
197
TCG_REG_A0,
265
+};
198
+
266
+#endif
199
+ /* Vector registers */
267
+
200
+ TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
268
#include "hw/core/tcg-cpu-ops.h"
201
+ TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
269
202
+ TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
270
static struct TCGCPUOps m68k_tcg_ops = {
203
+ TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
271
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
204
+ TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
272
#if defined(CONFIG_SOFTMMU)
205
+ TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
273
cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
206
+ /* V24 - V31 are caller-saved, and skipped. */
274
dc->vmsd = &vmstate_m68k_cpu;
207
};
275
+ cc->sysemu_ops = &m68k_sysemu_ops;
208
276
#endif
209
static const int tcg_target_call_iarg_regs[] = {
277
cc->disas_set_info = m68k_cpu_disas_set_info;
210
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
278
211
#define TCG_CT_CONST_WSZ 0x2000
279
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
212
280
index XXXXXXX..XXXXXXX 100644
213
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
281
--- a/target/microblaze/cpu.c
214
+#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
282
+++ b/target/microblaze/cpu.c
215
283
@@ -XXX,XX +XXX,XX @@ static ObjectClass *mb_cpu_class_by_name(const char *cpu_model)
216
static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
284
return object_class_by_name(TYPE_MICROBLAZE_CPU);
217
{
218
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
219
}
285
}
220
}
286
221
287
+#ifndef CONFIG_USER_ONLY
222
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
288
+#include "hw/core/sysemu-cpu-ops.h"
223
+ TCGReg rd, TCGReg rs)
289
+
224
+{
290
+static const struct SysemuCPUOps mb_sysemu_ops = {
225
+ switch (vece) {
291
+};
226
+ case MO_8:
292
+#endif
227
+ tcg_out_opc_vreplgr2vr_b(s, rd, rs);
293
+
228
+ break;
294
#include "hw/core/tcg-cpu-ops.h"
229
+ case MO_16:
295
230
+ tcg_out_opc_vreplgr2vr_h(s, rd, rs);
296
static struct TCGCPUOps mb_tcg_ops = {
231
+ break;
297
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
232
+ case MO_32:
298
#ifndef CONFIG_USER_ONLY
233
+ tcg_out_opc_vreplgr2vr_w(s, rd, rs);
299
cc->get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug;
234
+ break;
300
dc->vmsd = &vmstate_mb_cpu;
235
+ case MO_64:
301
+ cc->sysemu_ops = &mb_sysemu_ops;
236
+ tcg_out_opc_vreplgr2vr_d(s, rd, rs);
302
#endif
237
+ break;
303
device_class_set_props(dc, mb_properties);
238
+ default:
304
cc->gdb_num_core_regs = 32 + 27;
239
+ g_assert_not_reached();
305
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
240
+ }
306
index XXXXXXX..XXXXXXX 100644
241
+ return true;
307
--- a/target/mips/cpu.c
242
+}
308
+++ b/target/mips/cpu.c
243
+
309
@@ -XXX,XX +XXX,XX @@ static Property mips_cpu_properties[] = {
244
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
310
DEFINE_PROP_END_OF_LIST()
245
+ TCGReg r, TCGReg base, intptr_t offset)
311
};
246
+{
312
247
+ /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */
313
+#ifndef CONFIG_USER_ONLY
248
+ if (offset < -0x800 || offset > 0x7ff || \
314
+#include "hw/core/sysemu-cpu-ops.h"
249
+ (offset & ((1 << vece) - 1)) != 0) {
315
+
250
+ tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
316
+static const struct SysemuCPUOps mips_sysemu_ops = {
251
+ base = TCG_REG_TMP0;
317
+};
252
+ offset = 0;
318
+#endif
253
+ }
319
+
254
+ offset >>= vece;
320
#ifdef CONFIG_TCG
255
+
321
#include "hw/core/tcg-cpu-ops.h"
256
+ switch (vece) {
322
/*
257
+ case MO_8:
323
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
258
+ tcg_out_opc_vldrepl_b(s, r, base, offset);
324
#ifndef CONFIG_USER_ONLY
259
+ break;
325
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
260
+ case MO_16:
326
cc->legacy_vmsd = &vmstate_mips_cpu;
261
+ tcg_out_opc_vldrepl_h(s, r, base, offset);
327
+ cc->sysemu_ops = &mips_sysemu_ops;
262
+ break;
328
#endif
263
+ case MO_32:
329
cc->disas_set_info = mips_cpu_disas_set_info;
264
+ tcg_out_opc_vldrepl_w(s, r, base, offset);
330
cc->gdb_num_core_regs = 73;
265
+ break;
331
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
266
+ case MO_64:
332
index XXXXXXX..XXXXXXX 100644
267
+ tcg_out_opc_vldrepl_d(s, r, base, offset);
333
--- a/target/nios2/cpu.c
268
+ break;
334
+++ b/target/nios2/cpu.c
269
+ default:
335
@@ -XXX,XX +XXX,XX @@ static Property nios2_properties[] = {
270
+ g_assert_not_reached();
336
DEFINE_PROP_END_OF_LIST(),
271
+ }
337
};
272
+ return true;
338
273
+}
339
+#ifndef CONFIG_USER_ONLY
274
+
340
+#include "hw/core/sysemu-cpu-ops.h"
275
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
341
+
276
+ TCGReg rd, int64_t v64)
342
+static const struct SysemuCPUOps nios2_sysemu_ops = {
277
+{
343
+};
278
+ /* Try vldi if imm can fit */
344
+#endif
279
+ int64_t value = sextract64(v64, 0, 8 << vece);
345
+
280
+ if (-0x200 <= value && value <= 0x1FF) {
346
#include "hw/core/tcg-cpu-ops.h"
281
+ uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
347
282
+ tcg_out_opc_vldi(s, rd, imm);
348
static struct TCGCPUOps nios2_tcg_ops = {
283
+ return;
349
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
284
+ }
350
cc->disas_set_info = nios2_cpu_disas_set_info;
285
+
351
#ifndef CONFIG_USER_ONLY
286
+ /* TODO: vldi patterns when imm 12 is set */
352
cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
287
+
353
+ cc->sysemu_ops = &nios2_sysemu_ops;
288
+ /* Fallback to vreplgr2vr */
354
#endif
289
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
355
cc->gdb_read_register = nios2_cpu_gdb_read_register;
290
+ switch (vece) {
356
cc->gdb_write_register = nios2_cpu_gdb_write_register;
291
+ case MO_8:
357
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
292
+ tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0);
358
index XXXXXXX..XXXXXXX 100644
293
+ break;
359
--- a/target/openrisc/cpu.c
294
+ case MO_16:
360
+++ b/target/openrisc/cpu.c
295
+ tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0);
361
@@ -XXX,XX +XXX,XX @@ static void openrisc_any_initfn(Object *obj)
296
+ break;
362
| (IMMUCFGR_NTS & (ctz32(TLB_SIZE) << 2));
297
+ case MO_32:
298
+ tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0);
299
+ break;
300
+ case MO_64:
301
+ tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0);
302
+ break;
303
+ default:
304
+ g_assert_not_reached();
305
+ }
306
+}
307
+
308
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
309
+ unsigned vecl, unsigned vece,
310
+ const TCGArg args[TCG_MAX_OP_ARGS],
311
+ const int const_args[TCG_MAX_OP_ARGS])
312
+{
313
+ TCGType type = vecl + TCG_TYPE_V64;
314
+ TCGArg a0, a1, a2;
315
+ TCGReg temp = TCG_REG_TMP0;
316
+
317
+ a0 = args[0];
318
+ a1 = args[1];
319
+ a2 = args[2];
320
+
321
+ /* Currently only supports V128 */
322
+ tcg_debug_assert(type == TCG_TYPE_V128);
323
+
324
+ switch (opc) {
325
+ case INDEX_op_st_vec:
326
+ /* Try to fit vst imm */
327
+ if (-0x800 <= a2 && a2 <= 0x7ff) {
328
+ tcg_out_opc_vst(s, a0, a1, a2);
329
+ } else {
330
+ tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
331
+ tcg_out_opc_vstx(s, a0, a1, temp);
332
+ }
333
+ break;
334
+ case INDEX_op_ld_vec:
335
+ /* Try to fit vld imm */
336
+ if (-0x800 <= a2 && a2 <= 0x7ff) {
337
+ tcg_out_opc_vld(s, a0, a1, a2);
338
+ } else {
339
+ tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
340
+ tcg_out_opc_vldx(s, a0, a1, temp);
341
+ }
342
+ break;
343
+ case INDEX_op_dupm_vec:
344
+ tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
345
+ break;
346
+ default:
347
+ g_assert_not_reached();
348
+ }
349
+}
350
+
351
+int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
352
+{
353
+ switch (opc) {
354
+ case INDEX_op_ld_vec:
355
+ case INDEX_op_st_vec:
356
+ case INDEX_op_dup_vec:
357
+ case INDEX_op_dupm_vec:
358
+ return 1;
359
+ default:
360
+ return 0;
361
+ }
362
+}
363
+
364
+void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
365
+ TCGArg a0, ...)
366
+{
367
+ g_assert_not_reached();
368
+}
369
+
370
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
371
{
372
switch (op) {
373
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
374
case INDEX_op_movcond_i64:
375
return C_O1_I4(r, rZ, rJ, rZ, rZ);
376
377
+ case INDEX_op_ld_vec:
378
+ case INDEX_op_dupm_vec:
379
+ case INDEX_op_dup_vec:
380
+ return C_O1_I1(w, r);
381
+
382
+ case INDEX_op_st_vec:
383
+ return C_O0_I2(w, r);
384
+
385
default:
386
g_assert_not_reached();
387
}
388
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
389
exit(EXIT_FAILURE);
390
}
391
392
+ if (hwcap & HWCAP_LOONGARCH_LSX) {
393
+ use_lsx_instructions = 1;
394
+ }
395
+
396
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
397
tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
398
399
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
400
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
401
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
402
403
+ if (use_lsx_instructions) {
404
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
405
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
406
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
407
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
408
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
409
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
410
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
411
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
412
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
413
+ }
414
+
415
s->reserved_regs = 0;
416
tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
417
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
418
@@ -XXX,XX +XXX,XX @@ static void tcg_target_init(TCGContext *s)
419
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
420
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
421
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
422
+ tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
363
}
423
}
364
424
365
+#ifndef CONFIG_USER_ONLY
425
typedef struct {
366
+#include "hw/core/sysemu-cpu-ops.h"
367
+
368
+static const struct SysemuCPUOps openrisc_sysemu_ops = {
369
+};
370
+#endif
371
+
372
#include "hw/core/tcg-cpu-ops.h"
373
374
static struct TCGCPUOps openrisc_tcg_ops = {
375
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
376
#ifndef CONFIG_USER_ONLY
377
cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
378
dc->vmsd = &vmstate_openrisc_cpu;
379
+ cc->sysemu_ops = &openrisc_sysemu_ops;
380
#endif
381
cc->gdb_num_core_regs = 32 + 3;
382
cc->disas_set_info = openrisc_disas_set_info;
383
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
384
index XXXXXXX..XXXXXXX 100644
385
--- a/target/ppc/cpu_init.c
386
+++ b/target/ppc/cpu_init.c
387
@@ -XXX,XX +XXX,XX @@ static Property ppc_cpu_properties[] = {
388
DEFINE_PROP_END_OF_LIST(),
389
};
390
391
+#ifndef CONFIG_USER_ONLY
392
+#include "hw/core/sysemu-cpu-ops.h"
393
+
394
+static const struct SysemuCPUOps ppc_sysemu_ops = {
395
+};
396
+#endif
397
+
398
#ifdef CONFIG_TCG
399
#include "hw/core/tcg-cpu-ops.h"
400
401
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
402
#ifndef CONFIG_USER_ONLY
403
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
404
cc->legacy_vmsd = &vmstate_ppc_cpu;
405
+ cc->sysemu_ops = &ppc_sysemu_ops;
406
#endif
407
#if defined(CONFIG_SOFTMMU)
408
cc->write_elf64_note = ppc64_cpu_write_elf64_note;
409
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
410
index XXXXXXX..XXXXXXX 100644
411
--- a/target/riscv/cpu.c
412
+++ b/target/riscv/cpu.c
413
@@ -XXX,XX +XXX,XX @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
414
return NULL;
415
}
416
417
+#ifndef CONFIG_USER_ONLY
418
+#include "hw/core/sysemu-cpu-ops.h"
419
+
420
+static const struct SysemuCPUOps riscv_sysemu_ops = {
421
+};
422
+#endif
423
+
424
#include "hw/core/tcg-cpu-ops.h"
425
426
static struct TCGCPUOps riscv_tcg_ops = {
427
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
428
#ifndef CONFIG_USER_ONLY
429
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
430
cc->legacy_vmsd = &vmstate_riscv_cpu;
431
+ cc->sysemu_ops = &riscv_sysemu_ops;
432
cc->write_elf64_note = riscv_cpu_write_elf64_note;
433
cc->write_elf32_note = riscv_cpu_write_elf32_note;
434
#endif
435
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
436
index XXXXXXX..XXXXXXX 100644
437
--- a/target/rx/cpu.c
438
+++ b/target/rx/cpu.c
439
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_init(Object *obj)
440
qdev_init_gpio_in(DEVICE(cpu), rx_cpu_set_irq, 2);
441
}
442
443
+#ifndef CONFIG_USER_ONLY
444
+#include "hw/core/sysemu-cpu-ops.h"
445
+
446
+static const struct SysemuCPUOps rx_sysemu_ops = {
447
+};
448
+#endif
449
+
450
#include "hw/core/tcg-cpu-ops.h"
451
452
static struct TCGCPUOps rx_tcg_ops = {
453
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
454
cc->dump_state = rx_cpu_dump_state;
455
cc->set_pc = rx_cpu_set_pc;
456
457
+#ifndef CONFIG_USER_ONLY
458
+ cc->sysemu_ops = &rx_sysemu_ops;
459
+#endif
460
cc->gdb_read_register = rx_cpu_gdb_read_register;
461
cc->gdb_write_register = rx_cpu_gdb_write_register;
462
cc->get_phys_page_debug = rx_cpu_get_phys_page_debug;
463
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
464
index XXXXXXX..XXXXXXX 100644
465
--- a/target/s390x/cpu.c
466
+++ b/target/s390x/cpu.c
467
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_full(DeviceState *dev)
468
return s390_cpu_reset(s, S390_CPU_RESET_CLEAR);
469
}
470
471
+#ifndef CONFIG_USER_ONLY
472
+#include "hw/core/sysemu-cpu-ops.h"
473
+
474
+static const struct SysemuCPUOps s390_sysemu_ops = {
475
+};
476
+#endif
477
+
478
#ifdef CONFIG_TCG
479
#include "hw/core/tcg-cpu-ops.h"
480
481
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
482
cc->legacy_vmsd = &vmstate_s390_cpu;
483
cc->get_crash_info = s390_cpu_get_crash_info;
484
cc->write_elf64_note = s390_cpu_write_elf64_note;
485
+ cc->sysemu_ops = &s390_sysemu_ops;
486
#endif
487
cc->disas_set_info = s390_cpu_disas_set_info;
488
cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
489
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
490
index XXXXXXX..XXXXXXX 100644
491
--- a/target/sh4/cpu.c
492
+++ b/target/sh4/cpu.c
493
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_sh_cpu = {
494
.name = "cpu",
495
.unmigratable = 1,
496
};
497
+
498
+#include "hw/core/sysemu-cpu-ops.h"
499
+
500
+static const struct SysemuCPUOps sh4_sysemu_ops = {
501
+};
502
#endif
503
504
#include "hw/core/tcg-cpu-ops.h"
505
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
506
cc->gdb_write_register = superh_cpu_gdb_write_register;
507
#ifndef CONFIG_USER_ONLY
508
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
509
+ cc->sysemu_ops = &sh4_sysemu_ops;
510
dc->vmsd = &vmstate_sh_cpu;
511
#endif
512
cc->disas_set_info = superh_cpu_disas_set_info;
513
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
514
index XXXXXXX..XXXXXXX 100644
515
--- a/target/sparc/cpu.c
516
+++ b/target/sparc/cpu.c
517
@@ -XXX,XX +XXX,XX @@ static Property sparc_cpu_properties[] = {
518
DEFINE_PROP_END_OF_LIST()
519
};
520
521
+#ifndef CONFIG_USER_ONLY
522
+#include "hw/core/sysemu-cpu-ops.h"
523
+
524
+static const struct SysemuCPUOps sparc_sysemu_ops = {
525
+};
526
+#endif
527
+
528
#ifdef CONFIG_TCG
529
#include "hw/core/tcg-cpu-ops.h"
530
531
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
532
#ifndef CONFIG_USER_ONLY
533
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
534
cc->legacy_vmsd = &vmstate_sparc_cpu;
535
+ cc->sysemu_ops = &sparc_sysemu_ops;
536
#endif
537
cc->disas_set_info = cpu_sparc_disas_set_info;
538
539
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
540
index XXXXXXX..XXXXXXX 100644
541
--- a/target/tricore/cpu.c
542
+++ b/target/tricore/cpu.c
543
@@ -XXX,XX +XXX,XX @@ static void tc27x_initfn(Object *obj)
544
set_feature(&cpu->env, TRICORE_FEATURE_161);
545
}
546
547
+#include "hw/core/sysemu-cpu-ops.h"
548
+
549
+static const struct SysemuCPUOps tricore_sysemu_ops = {
550
+};
551
+
552
#include "hw/core/tcg-cpu-ops.h"
553
554
static struct TCGCPUOps tricore_tcg_ops = {
555
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
556
cc->dump_state = tricore_cpu_dump_state;
557
cc->set_pc = tricore_cpu_set_pc;
558
cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug;
559
+ cc->sysemu_ops = &tricore_sysemu_ops;
560
cc->tcg_ops = &tricore_tcg_ops;
561
}
562
563
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
564
index XXXXXXX..XXXXXXX 100644
565
--- a/target/xtensa/cpu.c
566
+++ b/target/xtensa/cpu.c
567
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_xtensa_cpu = {
568
.name = "cpu",
569
.unmigratable = 1,
570
};
571
+
572
+#include "hw/core/sysemu-cpu-ops.h"
573
+
574
+static const struct SysemuCPUOps xtensa_sysemu_ops = {
575
+};
576
#endif
577
578
#include "hw/core/tcg-cpu-ops.h"
579
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
580
cc->gdb_write_register = xtensa_cpu_gdb_write_register;
581
cc->gdb_stop_before_watchpoint = true;
582
#ifndef CONFIG_USER_ONLY
583
+ cc->sysemu_ops = &xtensa_sysemu_ops;
584
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
585
dc->vmsd = &vmstate_xtensa_cpu;
586
#endif
587
--
426
--
588
2.25.1
427
2.34.1
589
590
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: Jiajie Chen <c@jia.je>
2
2
3
Use uint8_t for (unsigned) byte, and uint16_t for (unsigned)
3
Pass vece to tcg_target_const_match() to allow correct interpretation of
4
16-bit word.
4
const args of vector ops.
5
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Signed-off-by: Jiajie Chen <c@jia.je>
7
Message-Id: <20210518183655.1711377-5-philmd@redhat.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Message-Id: <20230908022302.180442-4-c@jia.je>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
11
---
10
include/exec/memory_ldst_phys.h.inc | 16 ++++++++--------
12
tcg/tcg.c | 4 ++--
11
1 file changed, 8 insertions(+), 8 deletions(-)
13
tcg/aarch64/tcg-target.c.inc | 2 +-
14
tcg/arm/tcg-target.c.inc | 2 +-
15
tcg/i386/tcg-target.c.inc | 2 +-
16
tcg/loongarch64/tcg-target.c.inc | 2 +-
17
tcg/mips/tcg-target.c.inc | 2 +-
18
tcg/ppc/tcg-target.c.inc | 2 +-
19
tcg/riscv/tcg-target.c.inc | 2 +-
20
tcg/s390x/tcg-target.c.inc | 2 +-
21
tcg/sparc64/tcg-target.c.inc | 2 +-
22
tcg/tci/tcg-target.c.inc | 2 +-
23
11 files changed, 12 insertions(+), 12 deletions(-)
12
24
13
diff --git a/include/exec/memory_ldst_phys.h.inc b/include/exec/memory_ldst_phys.h.inc
25
diff --git a/tcg/tcg.c b/tcg/tcg.c
14
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/memory_ldst_phys.h.inc
27
--- a/tcg/tcg.c
16
+++ b/include/exec/memory_ldst_phys.h.inc
28
+++ b/tcg/tcg.c
17
@@ -XXX,XX +XXX,XX @@
29
@@ -XXX,XX +XXX,XX @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
30
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
31
const TCGHelperInfo *info);
32
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
33
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct);
34
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
35
#ifdef TCG_TARGET_NEED_LDST_LABELS
36
static int tcg_out_ldst_finalize(TCGContext *s);
37
#endif
38
@@ -XXX,XX +XXX,XX @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
39
ts = arg_temp(arg);
40
41
if (ts->val_type == TEMP_VAL_CONST
42
- && tcg_target_const_match(ts->val, ts->type, arg_ct->ct)) {
43
+ && tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) {
44
/* constant is OK for instruction */
45
const_args[i] = 1;
46
new_args[i] = ts->val;
47
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
48
index XXXXXXX..XXXXXXX 100644
49
--- a/tcg/aarch64/tcg-target.c.inc
50
+++ b/tcg/aarch64/tcg-target.c.inc
51
@@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
52
}
53
}
54
55
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
56
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
57
{
58
if (ct & TCG_CT_CONST) {
59
return 1;
60
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
61
index XXXXXXX..XXXXXXX 100644
62
--- a/tcg/arm/tcg-target.c.inc
63
+++ b/tcg/arm/tcg-target.c.inc
64
@@ -XXX,XX +XXX,XX @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
65
* mov operand2: values represented with x << (2 * y), x < 0x100
66
* add, sub, eor...: ditto
18
*/
67
*/
19
68
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
20
#ifdef TARGET_ENDIANNESS
69
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
21
-static inline uint32_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
22
+static inline uint16_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
23
{
70
{
24
return glue(address_space_lduw, SUFFIX)(ARG1, addr,
71
if (ct & TCG_CT_CONST) {
25
MEMTXATTRS_UNSPECIFIED, NULL);
72
return 1;
26
@@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(ldq_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
73
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
27
MEMTXATTRS_UNSPECIFIED, NULL);
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/i386/tcg-target.c.inc
76
+++ b/tcg/i386/tcg-target.c.inc
77
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
28
}
78
}
29
79
30
-static inline void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
80
/* test if a constant matches the constraint */
31
+static inline void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
81
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
82
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
32
{
83
{
33
glue(address_space_stw, SUFFIX)(ARG1, addr, val,
84
if (ct & TCG_CT_CONST) {
34
MEMTXATTRS_UNSPECIFIED, NULL);
85
return 1;
35
@@ -XXX,XX +XXX,XX @@ static inline void glue(stq_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
86
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
36
MEMTXATTRS_UNSPECIFIED, NULL);
87
index XXXXXXX..XXXXXXX 100644
88
--- a/tcg/loongarch64/tcg-target.c.inc
89
+++ b/tcg/loongarch64/tcg-target.c.inc
90
@@ -XXX,XX +XXX,XX @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
37
}
91
}
38
#else
92
39
-static inline uint32_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
93
/* test if a constant matches the constraint */
40
+static inline uint8_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
94
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
95
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
41
{
96
{
42
return glue(address_space_ldub, SUFFIX)(ARG1, addr,
97
if (ct & TCG_CT_CONST) {
43
MEMTXATTRS_UNSPECIFIED, NULL);
98
return true;
99
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
100
index XXXXXXX..XXXXXXX 100644
101
--- a/tcg/mips/tcg-target.c.inc
102
+++ b/tcg/mips/tcg-target.c.inc
103
@@ -XXX,XX +XXX,XX @@ static bool is_p2m1(tcg_target_long val)
44
}
104
}
45
105
46
-static inline uint32_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
106
/* test if a constant matches the constraint */
47
+static inline uint16_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
107
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
108
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
48
{
109
{
49
return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
110
if (ct & TCG_CT_CONST) {
50
MEMTXATTRS_UNSPECIFIED, NULL);
111
return 1;
112
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
113
index XXXXXXX..XXXXXXX 100644
114
--- a/tcg/ppc/tcg-target.c.inc
115
+++ b/tcg/ppc/tcg-target.c.inc
116
@@ -XXX,XX +XXX,XX @@ static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
51
}
117
}
52
118
53
-static inline uint32_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
119
/* test if a constant matches the constraint */
54
+static inline uint16_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
120
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
121
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
55
{
122
{
56
return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
123
if (ct & TCG_CT_CONST) {
57
MEMTXATTRS_UNSPECIFIED, NULL);
124
return 1;
58
@@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(ldq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
125
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
59
MEMTXATTRS_UNSPECIFIED, NULL);
126
index XXXXXXX..XXXXXXX 100644
127
--- a/tcg/riscv/tcg-target.c.inc
128
+++ b/tcg/riscv/tcg-target.c.inc
129
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
130
#define sextreg sextract64
131
132
/* test if a constant matches the constraint */
133
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
134
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
135
{
136
if (ct & TCG_CT_CONST) {
137
return 1;
138
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
139
index XXXXXXX..XXXXXXX 100644
140
--- a/tcg/s390x/tcg-target.c.inc
141
+++ b/tcg/s390x/tcg-target.c.inc
142
@@ -XXX,XX +XXX,XX @@ static bool risbg_mask(uint64_t c)
60
}
143
}
61
144
62
-static inline void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
145
/* Test if a constant matches the constraint. */
63
+static inline void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint8_t val)
146
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
147
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
64
{
148
{
65
glue(address_space_stb, SUFFIX)(ARG1, addr, val,
149
if (ct & TCG_CT_CONST) {
66
MEMTXATTRS_UNSPECIFIED, NULL);
150
return 1;
151
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
152
index XXXXXXX..XXXXXXX 100644
153
--- a/tcg/sparc64/tcg-target.c.inc
154
+++ b/tcg/sparc64/tcg-target.c.inc
155
@@ -XXX,XX +XXX,XX @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
67
}
156
}
68
157
69
-static inline void glue(stw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
158
/* test if a constant matches the constraint */
70
+static inline void glue(stw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
159
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
160
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
71
{
161
{
72
glue(address_space_stw_le, SUFFIX)(ARG1, addr, val,
162
if (ct & TCG_CT_CONST) {
73
MEMTXATTRS_UNSPECIFIED, NULL);
163
return 1;
164
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
165
index XXXXXXX..XXXXXXX 100644
166
--- a/tcg/tci/tcg-target.c.inc
167
+++ b/tcg/tci/tcg-target.c.inc
168
@@ -XXX,XX +XXX,XX @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
74
}
169
}
75
170
76
-static inline void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
171
/* Test if a constant matches the constraint. */
77
+static inline void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint16_t val)
172
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
173
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
78
{
174
{
79
glue(address_space_stw_be, SUFFIX)(ARG1, addr, val,
175
return ct & TCG_CT_CONST;
80
MEMTXATTRS_UNSPECIFIED, NULL);
176
}
81
--
177
--
82
2.25.1
178
2.34.1
83
179
84
180
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Jiajie Chen <c@jia.je>
2
2
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20210517105140.1062037-23-f4bug@amsat.org>
5
Message-Id: <20230908022302.180442-5-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
7
---
8
include/hw/core/cpu.h | 2 --
8
tcg/loongarch64/tcg-target-con-set.h | 1 +
9
include/hw/core/sysemu-cpu-ops.h | 4 ++++
9
tcg/loongarch64/tcg-target-con-str.h | 1 +
10
hw/core/cpu-sysemu.c | 4 ++--
10
tcg/loongarch64/tcg-target.c.inc | 65 ++++++++++++++++++++++++++++
11
target/i386/cpu.c | 4 +++-
11
3 files changed, 67 insertions(+)
12
4 files changed, 9 insertions(+), 5 deletions(-)
13
12
14
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
13
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
15
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
16
--- a/include/hw/core/cpu.h
15
--- a/tcg/loongarch64/tcg-target-con-set.h
17
+++ b/include/hw/core/cpu.h
16
+++ b/tcg/loongarch64/tcg-target-con-set.h
18
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rZ)
19
* @dump_state: Callback for dumping state.
18
C_O1_I2(r, rZ, ri)
20
* @dump_statistics: Callback for dumping statistics.
19
C_O1_I2(r, rZ, rJ)
21
* @get_arch_id: Callback for getting architecture-dependent CPU ID.
20
C_O1_I2(r, rZ, rZ)
22
- * @get_paging_enabled: Callback for inquiring whether paging is enabled.
21
+C_O1_I2(w, w, wM)
23
* @set_pc: Callback for setting the Program Counter register. This
22
C_O1_I4(r, rZ, rJ, rZ, rZ)
24
* should have the semantics used by the target architecture when
23
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
25
* setting the PC from a source such as an ELF file entry point;
26
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
27
void (*dump_state)(CPUState *cpu, FILE *, int flags);
28
void (*dump_statistics)(CPUState *cpu, int flags);
29
int64_t (*get_arch_id)(CPUState *cpu);
30
- bool (*get_paging_enabled)(const CPUState *cpu);
31
void (*set_pc)(CPUState *cpu, vaddr value);
32
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
33
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
34
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
35
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
36
--- a/include/hw/core/sysemu-cpu-ops.h
25
--- a/tcg/loongarch64/tcg-target-con-str.h
37
+++ b/include/hw/core/sysemu-cpu-ops.h
26
+++ b/tcg/loongarch64/tcg-target-con-str.h
38
@@ -XXX,XX +XXX,XX @@ typedef struct SysemuCPUOps {
27
@@ -XXX,XX +XXX,XX @@ CONST('U', TCG_CT_CONST_U12)
39
*/
28
CONST('Z', TCG_CT_CONST_ZERO)
40
void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
29
CONST('C', TCG_CT_CONST_C12)
41
Error **errp);
30
CONST('W', TCG_CT_CONST_WSZ)
42
+ /**
31
+CONST('M', TCG_CT_CONST_VCMP)
43
+ * @get_paging_enabled: Callback for inquiring whether paging is enabled.
32
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
44
+ */
45
+ bool (*get_paging_enabled)(const CPUState *cpu);
46
/**
47
* @get_phys_page_debug: Callback for obtaining a physical address.
48
*/
49
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
50
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
51
--- a/hw/core/cpu-sysemu.c
34
--- a/tcg/loongarch64/tcg-target.c.inc
52
+++ b/hw/core/cpu-sysemu.c
35
+++ b/tcg/loongarch64/tcg-target.c.inc
53
@@ -XXX,XX +XXX,XX @@ bool cpu_paging_enabled(const CPUState *cpu)
36
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
54
{
37
#define TCG_CT_CONST_U12 0x800
55
CPUClass *cc = CPU_GET_CLASS(cpu);
38
#define TCG_CT_CONST_C12 0x1000
56
39
#define TCG_CT_CONST_WSZ 0x2000
57
- if (cc->get_paging_enabled) {
40
+#define TCG_CT_CONST_VCMP 0x4000
58
- return cc->get_paging_enabled(cpu);
41
59
+ if (cc->sysemu_ops->get_paging_enabled) {
42
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
60
+ return cc->sysemu_ops->get_paging_enabled(cpu);
43
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
44
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
45
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
46
return true;
61
}
47
}
62
48
+ int64_t vec_val = sextract64(val, 0, 8 << vece);
49
+ if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
50
+ return true;
51
+ }
63
return false;
52
return false;
64
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
65
index XXXXXXX..XXXXXXX 100644
66
--- a/target/i386/cpu.c
67
+++ b/target/i386/cpu.c
68
@@ -XXX,XX +XXX,XX @@ static int64_t x86_cpu_get_arch_id(CPUState *cs)
69
return cpu->apic_id;
70
}
53
}
71
54
72
+#if !defined(CONFIG_USER_ONLY)
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
73
static bool x86_cpu_get_paging_enabled(const CPUState *cs)
56
TCGType type = vecl + TCG_TYPE_V64;
74
{
57
TCGArg a0, a1, a2;
75
X86CPU *cpu = X86_CPU(cs);
58
TCGReg temp = TCG_REG_TMP0;
76
59
+ TCGReg temp_vec = TCG_VEC_TMP0;
77
return cpu->env.cr[0] & CR0_PG_MASK;
60
+
78
}
61
+ static const LoongArchInsn cmp_vec_insn[16][4] = {
79
+#endif /* !CONFIG_USER_ONLY */
62
+ [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D},
80
63
+ [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D},
81
static void x86_cpu_set_pc(CPUState *cs, vaddr value)
64
+ [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU},
82
{
65
+ [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D},
83
@@ -XXX,XX +XXX,XX @@ static Property x86_cpu_properties[] = {
66
+ [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU},
84
67
+ };
85
static const struct SysemuCPUOps i386_sysemu_ops = {
68
+ static const LoongArchInsn cmp_vec_imm_insn[16][4] = {
86
.get_memory_mapping = x86_cpu_get_memory_mapping,
69
+ [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D},
87
+ .get_paging_enabled = x86_cpu_get_paging_enabled,
70
+ [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D},
88
.get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug,
71
+ [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU},
89
.asidx_from_attrs = x86_asidx_from_attrs,
72
+ [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D},
90
.get_crash_info = x86_cpu_get_crash_info,
73
+ [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
91
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
74
+ };
92
cc->gdb_read_register = x86_cpu_gdb_read_register;
75
+ LoongArchInsn insn;
93
cc->gdb_write_register = x86_cpu_gdb_write_register;
76
94
cc->get_arch_id = x86_cpu_get_arch_id;
77
a0 = args[0];
95
- cc->get_paging_enabled = x86_cpu_get_paging_enabled;
78
a1 = args[1];
96
79
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
97
#ifndef CONFIG_USER_ONLY
80
tcg_out_opc_vldx(s, a0, a1, temp);
98
cc->sysemu_ops = &i386_sysemu_ops;
81
}
82
break;
83
+ case INDEX_op_cmp_vec:
84
+ TCGCond cond = args[3];
85
+ if (const_args[2]) {
86
+ /*
87
+ * cmp_vec dest, src, value
88
+ * Try vseqi/vslei/vslti
89
+ */
90
+ int64_t value = sextract64(a2, 0, 8 << vece);
91
+ if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
92
+ cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
93
+ tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
94
+ a0, a1, value));
95
+ break;
96
+ } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
97
+ (0x00 <= value && value <= 0x1f)) {
98
+ tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
99
+ a0, a1, value));
100
+ break;
101
+ }
102
+
103
+ /*
104
+ * Fallback to:
105
+ * dupi_vec temp, a2
106
+ * cmp_vec a0, a1, temp, cond
107
+ */
108
+ tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
109
+ a2 = temp_vec;
110
+ }
111
+
112
+ insn = cmp_vec_insn[cond][vece];
113
+ if (insn == 0) {
114
+ TCGArg t;
115
+ t = a1, a1 = a2, a2 = t;
116
+ cond = tcg_swap_cond(cond);
117
+ insn = cmp_vec_insn[cond][vece];
118
+ tcg_debug_assert(insn != 0);
119
+ }
120
+ tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
121
+ break;
122
case INDEX_op_dupm_vec:
123
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
124
break;
125
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
126
case INDEX_op_st_vec:
127
case INDEX_op_dup_vec:
128
case INDEX_op_dupm_vec:
129
+ case INDEX_op_cmp_vec:
130
return 1;
131
default:
132
return 0;
133
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
134
case INDEX_op_st_vec:
135
return C_O0_I2(w, r);
136
137
+ case INDEX_op_cmp_vec:
138
+ return C_O1_I2(w, w, wM);
139
+
140
default:
141
g_assert_not_reached();
142
}
99
--
143
--
100
2.25.1
144
2.34.1
101
102
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Jiajie Chen <c@jia.je>
2
2
3
No code uses CPUClass::get_memory_mapping() outside of hw/core/cpu.c:
3
Lower the following ops:
4
4
5
$ git grep -F -- '->get_memory_mapping'
5
- add_vec
6
hw/core/cpu.c:87: cc->get_memory_mapping(cpu, list, errp);
6
- sub_vec
7
hw/core/cpu.c:439: k->get_memory_mapping = cpu_common_get_memory_mapping;
8
target/i386/cpu.c:7422: cc->get_memory_mapping = x86_cpu_get_memory_mapping;
9
7
10
Check the handler presence in place and remove the common fallback code.
8
Signed-off-by: Jiajie Chen <c@jia.je>
11
12
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-Id: <20210517105140.1062037-11-f4bug@amsat.org>
10
Message-Id: <20230908022302.180442-6-c@jia.je>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
12
---
17
hw/core/cpu-common.c | 16 ----------------
13
tcg/loongarch64/tcg-target-con-set.h | 1 +
18
hw/core/cpu-sysemu.c | 13 +++++++++++++
14
tcg/loongarch64/tcg-target-con-str.h | 1 +
19
2 files changed, 13 insertions(+), 16 deletions(-)
15
tcg/loongarch64/tcg-target.c.inc | 61 ++++++++++++++++++++++++++++
16
3 files changed, 63 insertions(+)
20
17
21
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
18
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
22
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/core/cpu-common.c
20
--- a/tcg/loongarch64/tcg-target-con-set.h
24
+++ b/hw/core/cpu-common.c
21
+++ b/tcg/loongarch64/tcg-target-con-set.h
25
@@ -XXX,XX +XXX,XX @@ CPUState *cpu_create(const char *typename)
22
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, rZ, ri)
26
return cpu;
23
C_O1_I2(r, rZ, rJ)
27
}
24
C_O1_I2(r, rZ, rZ)
28
25
C_O1_I2(w, w, wM)
29
-void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
26
+C_O1_I2(w, w, wA)
30
- Error **errp)
27
C_O1_I4(r, rZ, rJ, rZ, rZ)
31
-{
28
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
32
- CPUClass *cc = CPU_GET_CLASS(cpu);
33
-
34
- cc->get_memory_mapping(cpu, list, errp);
35
-}
36
-
37
-static void cpu_common_get_memory_mapping(CPUState *cpu,
38
- MemoryMappingList *list,
39
- Error **errp)
40
-{
41
- error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
42
-}
43
-
44
/* Resetting the IRQ comes from across the code base so we take the
45
* BQL here if we need to. cpu_interrupt assumes it is held.*/
46
void cpu_reset_interrupt(CPUState *cpu, int mask)
47
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
48
k->parse_features = cpu_common_parse_features;
49
k->get_arch_id = cpu_common_get_arch_id;
50
k->has_work = cpu_common_has_work;
51
- k->get_memory_mapping = cpu_common_get_memory_mapping;
52
k->gdb_read_register = cpu_common_gdb_read_register;
53
k->gdb_write_register = cpu_common_gdb_write_register;
54
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
55
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
56
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
57
--- a/hw/core/cpu-sysemu.c
30
--- a/tcg/loongarch64/tcg-target-con-str.h
58
+++ b/hw/core/cpu-sysemu.c
31
+++ b/tcg/loongarch64/tcg-target-con-str.h
59
@@ -XXX,XX +XXX,XX @@ bool cpu_paging_enabled(const CPUState *cpu)
32
@@ -XXX,XX +XXX,XX @@ CONST('Z', TCG_CT_CONST_ZERO)
33
CONST('C', TCG_CT_CONST_C12)
34
CONST('W', TCG_CT_CONST_WSZ)
35
CONST('M', TCG_CT_CONST_VCMP)
36
+CONST('A', TCG_CT_CONST_VADD)
37
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
38
index XXXXXXX..XXXXXXX 100644
39
--- a/tcg/loongarch64/tcg-target.c.inc
40
+++ b/tcg/loongarch64/tcg-target.c.inc
41
@@ -XXX,XX +XXX,XX @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
42
#define TCG_CT_CONST_C12 0x1000
43
#define TCG_CT_CONST_WSZ 0x2000
44
#define TCG_CT_CONST_VCMP 0x4000
45
+#define TCG_CT_CONST_VADD 0x8000
46
47
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
48
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
49
@@ -XXX,XX +XXX,XX @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
50
if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
51
return true;
52
}
53
+ if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
54
+ return true;
55
+ }
60
return false;
56
return false;
61
}
57
}
62
58
63
+void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
59
@@ -XXX,XX +XXX,XX @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
64
+ Error **errp)
60
}
61
}
62
63
+static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
64
+ const TCGArg a1, const TCGArg a2,
65
+ bool a2_is_const, bool is_add)
65
+{
66
+{
66
+ CPUClass *cc = CPU_GET_CLASS(cpu);
67
+ static const LoongArchInsn add_vec_insn[4] = {
68
+ OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
69
+ };
70
+ static const LoongArchInsn add_vec_imm_insn[4] = {
71
+ OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
72
+ };
73
+ static const LoongArchInsn sub_vec_insn[4] = {
74
+ OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
75
+ };
76
+ static const LoongArchInsn sub_vec_imm_insn[4] = {
77
+ OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
78
+ };
67
+
79
+
68
+ if (cc->get_memory_mapping) {
80
+ if (a2_is_const) {
69
+ cc->get_memory_mapping(cpu, list, errp);
81
+ int64_t value = sextract64(a2, 0, 8 << vece);
70
+ return;
82
+ if (!is_add) {
83
+ value = -value;
84
+ }
85
+
86
+ /* Try vaddi/vsubi */
87
+ if (0 <= value && value <= 0x1f) {
88
+ tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
89
+ a1, value));
90
+ return;
91
+ } else if (-0x1f <= value && value < 0) {
92
+ tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
93
+ a1, -value));
94
+ return;
95
+ }
96
+
97
+ /* constraint TCG_CT_CONST_VADD ensures unreachable */
98
+ g_assert_not_reached();
71
+ }
99
+ }
72
+
100
+
73
+ error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
101
+ if (is_add) {
102
+ tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
103
+ } else {
104
+ tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
105
+ }
74
+}
106
+}
75
+
107
+
76
hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
108
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
77
MemTxAttrs *attrs)
109
unsigned vecl, unsigned vece,
78
{
110
const TCGArg args[TCG_MAX_OP_ARGS],
111
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
112
}
113
tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
114
break;
115
+ case INDEX_op_add_vec:
116
+ tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true);
117
+ break;
118
+ case INDEX_op_sub_vec:
119
+ tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
120
+ break;
121
case INDEX_op_dupm_vec:
122
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
123
break;
124
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
125
case INDEX_op_dup_vec:
126
case INDEX_op_dupm_vec:
127
case INDEX_op_cmp_vec:
128
+ case INDEX_op_add_vec:
129
+ case INDEX_op_sub_vec:
130
return 1;
131
default:
132
return 0;
133
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
134
case INDEX_op_cmp_vec:
135
return C_O1_I2(w, w, wM);
136
137
+ case INDEX_op_add_vec:
138
+ case INDEX_op_sub_vec:
139
+ return C_O1_I2(w, w, wA);
140
+
141
default:
142
g_assert_not_reached();
143
}
79
--
144
--
80
2.25.1
145
2.34.1
81
82
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Lower the following ops:
4
5
- and_vec
6
- andc_vec
7
- or_vec
8
- orc_vec
9
- xor_vec
10
- nor_vec
11
- not_vec
12
13
Signed-off-by: Jiajie Chen <c@jia.je>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-Id: <20230908022302.180442-7-c@jia.je>
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
18
tcg/loongarch64/tcg-target-con-set.h | 2 ++
19
tcg/loongarch64/tcg-target.h | 8 ++---
20
tcg/loongarch64/tcg-target.c.inc | 44 ++++++++++++++++++++++++++++
21
3 files changed, 50 insertions(+), 4 deletions(-)
22
23
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/tcg/loongarch64/tcg-target-con-set.h
26
+++ b/tcg/loongarch64/tcg-target-con-set.h
27
@@ -XXX,XX +XXX,XX @@ C_O0_I2(rZ, rZ)
28
C_O0_I2(w, r)
29
C_O1_I1(r, r)
30
C_O1_I1(w, r)
31
+C_O1_I1(w, w)
32
C_O1_I2(r, r, rC)
33
C_O1_I2(r, r, ri)
34
C_O1_I2(r, r, rI)
35
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, 0, rZ)
36
C_O1_I2(r, rZ, ri)
37
C_O1_I2(r, rZ, rJ)
38
C_O1_I2(r, rZ, rZ)
39
+C_O1_I2(w, w, w)
40
C_O1_I2(w, w, wM)
41
C_O1_I2(w, w, wA)
42
C_O1_I4(r, rZ, rJ, rZ, rZ)
43
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/tcg/loongarch64/tcg-target.h
46
+++ b/tcg/loongarch64/tcg-target.h
47
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
48
#define TCG_TARGET_HAS_v128 use_lsx_instructions
49
#define TCG_TARGET_HAS_v256 0
50
51
-#define TCG_TARGET_HAS_not_vec 0
52
+#define TCG_TARGET_HAS_not_vec 1
53
#define TCG_TARGET_HAS_neg_vec 0
54
#define TCG_TARGET_HAS_abs_vec 0
55
-#define TCG_TARGET_HAS_andc_vec 0
56
-#define TCG_TARGET_HAS_orc_vec 0
57
+#define TCG_TARGET_HAS_andc_vec 1
58
+#define TCG_TARGET_HAS_orc_vec 1
59
#define TCG_TARGET_HAS_nand_vec 0
60
-#define TCG_TARGET_HAS_nor_vec 0
61
+#define TCG_TARGET_HAS_nor_vec 1
62
#define TCG_TARGET_HAS_eqv_vec 0
63
#define TCG_TARGET_HAS_mul_vec 0
64
#define TCG_TARGET_HAS_shi_vec 0
65
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
66
index XXXXXXX..XXXXXXX 100644
67
--- a/tcg/loongarch64/tcg-target.c.inc
68
+++ b/tcg/loongarch64/tcg-target.c.inc
69
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
70
tcg_out_opc_vldx(s, a0, a1, temp);
71
}
72
break;
73
+ case INDEX_op_and_vec:
74
+ tcg_out_opc_vand_v(s, a0, a1, a2);
75
+ break;
76
+ case INDEX_op_andc_vec:
77
+ /*
78
+ * vandn vd, vj, vk: vd = vk & ~vj
79
+ * andc_vec vd, vj, vk: vd = vj & ~vk
80
+ * vk and vk are swapped
81
+ */
82
+ tcg_out_opc_vandn_v(s, a0, a2, a1);
83
+ break;
84
+ case INDEX_op_or_vec:
85
+ tcg_out_opc_vor_v(s, a0, a1, a2);
86
+ break;
87
+ case INDEX_op_orc_vec:
88
+ tcg_out_opc_vorn_v(s, a0, a1, a2);
89
+ break;
90
+ case INDEX_op_xor_vec:
91
+ tcg_out_opc_vxor_v(s, a0, a1, a2);
92
+ break;
93
+ case INDEX_op_nor_vec:
94
+ tcg_out_opc_vnor_v(s, a0, a1, a2);
95
+ break;
96
+ case INDEX_op_not_vec:
97
+ tcg_out_opc_vnor_v(s, a0, a1, a1);
98
+ break;
99
case INDEX_op_cmp_vec:
100
TCGCond cond = args[3];
101
if (const_args[2]) {
102
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
103
case INDEX_op_cmp_vec:
104
case INDEX_op_add_vec:
105
case INDEX_op_sub_vec:
106
+ case INDEX_op_and_vec:
107
+ case INDEX_op_andc_vec:
108
+ case INDEX_op_or_vec:
109
+ case INDEX_op_orc_vec:
110
+ case INDEX_op_xor_vec:
111
+ case INDEX_op_nor_vec:
112
+ case INDEX_op_not_vec:
113
return 1;
114
default:
115
return 0;
116
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
117
case INDEX_op_sub_vec:
118
return C_O1_I2(w, w, wA);
119
120
+ case INDEX_op_and_vec:
121
+ case INDEX_op_andc_vec:
122
+ case INDEX_op_or_vec:
123
+ case INDEX_op_orc_vec:
124
+ case INDEX_op_xor_vec:
125
+ case INDEX_op_nor_vec:
126
+ return C_O1_I2(w, w, w);
127
+
128
+ case INDEX_op_not_vec:
129
+ return C_O1_I1(w, w);
130
+
131
default:
132
g_assert_not_reached();
133
}
134
--
135
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-8-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/loongarch64/tcg-target.h | 2 +-
9
tcg/loongarch64/tcg-target.c.inc | 8 ++++++++
10
2 files changed, 9 insertions(+), 1 deletion(-)
11
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.h
15
+++ b/tcg/loongarch64/tcg-target.h
16
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
17
#define TCG_TARGET_HAS_v256 0
18
19
#define TCG_TARGET_HAS_not_vec 1
20
-#define TCG_TARGET_HAS_neg_vec 0
21
+#define TCG_TARGET_HAS_neg_vec 1
22
#define TCG_TARGET_HAS_abs_vec 0
23
#define TCG_TARGET_HAS_andc_vec 1
24
#define TCG_TARGET_HAS_orc_vec 1
25
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/loongarch64/tcg-target.c.inc
28
+++ b/tcg/loongarch64/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
30
[TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
31
};
32
LoongArchInsn insn;
33
+ static const LoongArchInsn neg_vec_insn[4] = {
34
+ OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
35
+ };
36
37
a0 = args[0];
38
a1 = args[1];
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
40
case INDEX_op_sub_vec:
41
tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
42
break;
43
+ case INDEX_op_neg_vec:
44
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
45
+ break;
46
case INDEX_op_dupm_vec:
47
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
48
break;
49
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
50
case INDEX_op_xor_vec:
51
case INDEX_op_nor_vec:
52
case INDEX_op_not_vec:
53
+ case INDEX_op_neg_vec:
54
return 1;
55
default:
56
return 0;
57
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
58
return C_O1_I2(w, w, w);
59
60
case INDEX_op_not_vec:
61
+ case INDEX_op_neg_vec:
62
return C_O1_I1(w, w);
63
64
default:
65
--
66
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20230908022302.180442-9-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
8
tcg/loongarch64/tcg-target.h | 2 +-
9
tcg/loongarch64/tcg-target.c.inc | 8 ++++++++
10
2 files changed, 9 insertions(+), 1 deletion(-)
11
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
13
index XXXXXXX..XXXXXXX 100644
14
--- a/tcg/loongarch64/tcg-target.h
15
+++ b/tcg/loongarch64/tcg-target.h
16
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
17
#define TCG_TARGET_HAS_nand_vec 0
18
#define TCG_TARGET_HAS_nor_vec 1
19
#define TCG_TARGET_HAS_eqv_vec 0
20
-#define TCG_TARGET_HAS_mul_vec 0
21
+#define TCG_TARGET_HAS_mul_vec 1
22
#define TCG_TARGET_HAS_shi_vec 0
23
#define TCG_TARGET_HAS_shs_vec 0
24
#define TCG_TARGET_HAS_shv_vec 0
25
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
26
index XXXXXXX..XXXXXXX 100644
27
--- a/tcg/loongarch64/tcg-target.c.inc
28
+++ b/tcg/loongarch64/tcg-target.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
30
static const LoongArchInsn neg_vec_insn[4] = {
31
OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
32
};
33
+ static const LoongArchInsn mul_vec_insn[4] = {
34
+ OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
35
+ };
36
37
a0 = args[0];
38
a1 = args[1];
39
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
40
case INDEX_op_neg_vec:
41
tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
42
break;
43
+ case INDEX_op_mul_vec:
44
+ tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
45
+ break;
46
case INDEX_op_dupm_vec:
47
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
48
break;
49
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
50
case INDEX_op_nor_vec:
51
case INDEX_op_not_vec:
52
case INDEX_op_neg_vec:
53
+ case INDEX_op_mul_vec:
54
return 1;
55
default:
56
return 0;
57
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
58
case INDEX_op_orc_vec:
59
case INDEX_op_xor_vec:
60
case INDEX_op_nor_vec:
61
+ case INDEX_op_mul_vec:
62
return C_O1_I2(w, w, w);
63
64
case INDEX_op_not_vec:
65
--
66
2.34.1
diff view generated by jsdifflib
New patch
1
From: Jiajie Chen <c@jia.je>
1
2
3
Lower the following ops:
4
5
- smin_vec
6
- smax_vec
7
- umin_vec
8
- umax_vec
9
10
Signed-off-by: Jiajie Chen <c@jia.je>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
12
Message-Id: <20230908022302.180442-10-c@jia.je>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
15
tcg/loongarch64/tcg-target.h | 2 +-
16
tcg/loongarch64/tcg-target.c.inc | 32 ++++++++++++++++++++++++++++++++
17
2 files changed, 33 insertions(+), 1 deletion(-)
18
19
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/tcg/loongarch64/tcg-target.h
22
+++ b/tcg/loongarch64/tcg-target.h
23
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
24
#define TCG_TARGET_HAS_rots_vec 0
25
#define TCG_TARGET_HAS_rotv_vec 0
26
#define TCG_TARGET_HAS_sat_vec 0
27
-#define TCG_TARGET_HAS_minmax_vec 0
28
+#define TCG_TARGET_HAS_minmax_vec 1
29
#define TCG_TARGET_HAS_bitsel_vec 0
30
#define TCG_TARGET_HAS_cmpsel_vec 0
31
32
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/tcg/loongarch64/tcg-target.c.inc
35
+++ b/tcg/loongarch64/tcg-target.c.inc
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
37
static const LoongArchInsn mul_vec_insn[4] = {
38
OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
39
};
40
+ static const LoongArchInsn smin_vec_insn[4] = {
41
+ OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D
42
+ };
43
+ static const LoongArchInsn umin_vec_insn[4] = {
44
+ OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU
45
+ };
46
+ static const LoongArchInsn smax_vec_insn[4] = {
47
+ OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D
48
+ };
49
+ static const LoongArchInsn umax_vec_insn[4] = {
50
+ OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
51
+ };
52
53
a0 = args[0];
54
a1 = args[1];
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
56
case INDEX_op_mul_vec:
57
tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
58
break;
59
+ case INDEX_op_smin_vec:
60
+ tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2));
61
+ break;
62
+ case INDEX_op_smax_vec:
63
+ tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2));
64
+ break;
65
+ case INDEX_op_umin_vec:
66
+ tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2));
67
+ break;
68
+ case INDEX_op_umax_vec:
69
+ tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
70
+ break;
71
case INDEX_op_dupm_vec:
72
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
73
break;
74
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
75
case INDEX_op_not_vec:
76
case INDEX_op_neg_vec:
77
case INDEX_op_mul_vec:
78
+ case INDEX_op_smin_vec:
79
+ case INDEX_op_smax_vec:
80
+ case INDEX_op_umin_vec:
81
+ case INDEX_op_umax_vec:
82
return 1;
83
default:
84
return 0;
85
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
86
case INDEX_op_xor_vec:
87
case INDEX_op_nor_vec:
88
case INDEX_op_mul_vec:
89
+ case INDEX_op_smin_vec:
90
+ case INDEX_op_smax_vec:
91
+ case INDEX_op_umin_vec:
92
+ case INDEX_op_umax_vec:
93
return C_O1_I2(w, w, w);
94
95
case INDEX_op_not_vec:
96
--
97
2.34.1
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Jiajie Chen <c@jia.je>
2
2
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Lower the following ops:
4
5
- ssadd_vec
6
- usadd_vec
7
- sssub_vec
8
- ussub_vec
9
10
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20210517105140.1062037-20-f4bug@amsat.org>
12
Message-Id: <20230908022302.180442-11-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
14
---
8
include/hw/core/cpu.h | 3 ---
15
tcg/loongarch64/tcg-target.h | 2 +-
9
include/hw/core/sysemu-cpu-ops.h | 5 +++++
16
tcg/loongarch64/tcg-target.c.inc | 32 ++++++++++++++++++++++++++++++++
10
hw/core/cpu-sysemu.c | 4 ++--
17
2 files changed, 33 insertions(+), 1 deletion(-)
11
target/arm/cpu.c | 2 +-
12
target/i386/cpu.c | 2 +-
13
5 files changed, 9 insertions(+), 7 deletions(-)
14
18
15
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
19
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
16
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
17
--- a/include/hw/core/cpu.h
21
--- a/tcg/loongarch64/tcg-target.h
18
+++ b/include/hw/core/cpu.h
22
+++ b/tcg/loongarch64/tcg-target.h
19
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
23
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
20
* associated memory transaction attributes to use for the access.
24
#define TCG_TARGET_HAS_roti_vec 0
21
* CPUs which use memory transaction attributes should implement this
25
#define TCG_TARGET_HAS_rots_vec 0
22
* instead of get_phys_page_debug.
26
#define TCG_TARGET_HAS_rotv_vec 0
23
- * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
27
-#define TCG_TARGET_HAS_sat_vec 0
24
- * a memory access with the specified memory transaction attributes.
28
+#define TCG_TARGET_HAS_sat_vec 1
25
* @gdb_read_register: Callback for letting GDB read a register.
29
#define TCG_TARGET_HAS_minmax_vec 1
26
* @gdb_write_register: Callback for letting GDB write a register.
30
#define TCG_TARGET_HAS_bitsel_vec 0
27
* @gdb_num_core_regs: Number of core registers accessible to GDB.
31
#define TCG_TARGET_HAS_cmpsel_vec 0
28
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
32
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
29
hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
30
hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
31
MemTxAttrs *attrs);
32
- int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
33
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
34
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
35
36
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
37
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
38
--- a/include/hw/core/sysemu-cpu-ops.h
34
--- a/tcg/loongarch64/tcg-target.c.inc
39
+++ b/include/hw/core/sysemu-cpu-ops.h
35
+++ b/tcg/loongarch64/tcg-target.c.inc
40
@@ -XXX,XX +XXX,XX @@
36
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
41
* struct SysemuCPUOps: System operations specific to a CPU class
37
static const LoongArchInsn umax_vec_insn[4] = {
42
*/
38
OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
43
typedef struct SysemuCPUOps {
39
};
44
+ /**
40
+ static const LoongArchInsn ssadd_vec_insn[4] = {
45
+ * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
41
+ OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D
46
+ * a memory access with the specified memory transaction attributes.
42
+ };
47
+ */
43
+ static const LoongArchInsn usadd_vec_insn[4] = {
48
+ int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
44
+ OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU
49
/**
45
+ };
50
* @get_crash_info: Callback for reporting guest crash information in
46
+ static const LoongArchInsn sssub_vec_insn[4] = {
51
* GUEST_PANICKED events.
47
+ OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D
52
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
48
+ };
53
index XXXXXXX..XXXXXXX 100644
49
+ static const LoongArchInsn ussub_vec_insn[4] = {
54
--- a/hw/core/cpu-sysemu.c
50
+ OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
55
+++ b/hw/core/cpu-sysemu.c
51
+ };
56
@@ -XXX,XX +XXX,XX @@ int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
52
57
CPUClass *cc = CPU_GET_CLASS(cpu);
53
a0 = args[0];
58
int ret = 0;
54
a1 = args[1];
59
55
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
60
- if (cc->asidx_from_attrs) {
56
case INDEX_op_umax_vec:
61
- ret = cc->asidx_from_attrs(cpu, attrs);
57
tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
62
+ if (cc->sysemu_ops->asidx_from_attrs) {
58
break;
63
+ ret = cc->sysemu_ops->asidx_from_attrs(cpu, attrs);
59
+ case INDEX_op_ssadd_vec:
64
assert(ret < cpu->num_ases && ret >= 0);
60
+ tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2));
65
}
61
+ break;
66
return ret;
62
+ case INDEX_op_usadd_vec:
67
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
63
+ tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2));
68
index XXXXXXX..XXXXXXX 100644
64
+ break;
69
--- a/target/arm/cpu.c
65
+ case INDEX_op_sssub_vec:
70
+++ b/target/arm/cpu.c
66
+ tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2));
71
@@ -XXX,XX +XXX,XX @@ static gchar *arm_gdb_arch_name(CPUState *cs)
67
+ break;
72
#include "hw/core/sysemu-cpu-ops.h"
68
+ case INDEX_op_ussub_vec:
73
69
+ tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
74
static const struct SysemuCPUOps arm_sysemu_ops = {
70
+ break;
75
+ .asidx_from_attrs = arm_asidx_from_attrs,
71
case INDEX_op_dupm_vec:
76
.write_elf32_note = arm_cpu_write_elf32_note,
72
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
77
.write_elf64_note = arm_cpu_write_elf64_note,
73
break;
78
.virtio_is_big_endian = arm_cpu_virtio_is_big_endian,
74
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
79
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
75
case INDEX_op_smax_vec:
80
cc->gdb_write_register = arm_cpu_gdb_write_register;
76
case INDEX_op_umin_vec:
81
#ifndef CONFIG_USER_ONLY
77
case INDEX_op_umax_vec:
82
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
78
+ case INDEX_op_ssadd_vec:
83
- cc->asidx_from_attrs = arm_asidx_from_attrs;
79
+ case INDEX_op_usadd_vec:
84
cc->sysemu_ops = &arm_sysemu_ops;
80
+ case INDEX_op_sssub_vec:
85
#endif
81
+ case INDEX_op_ussub_vec:
86
cc->gdb_num_core_regs = 26;
82
return 1;
87
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
83
default:
88
index XXXXXXX..XXXXXXX 100644
84
return 0;
89
--- a/target/i386/cpu.c
85
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
90
+++ b/target/i386/cpu.c
86
case INDEX_op_smax_vec:
91
@@ -XXX,XX +XXX,XX @@ static Property x86_cpu_properties[] = {
87
case INDEX_op_umin_vec:
92
#include "hw/core/sysemu-cpu-ops.h"
88
case INDEX_op_umax_vec:
93
89
+ case INDEX_op_ssadd_vec:
94
static const struct SysemuCPUOps i386_sysemu_ops = {
90
+ case INDEX_op_usadd_vec:
95
+ .asidx_from_attrs = x86_asidx_from_attrs,
91
+ case INDEX_op_sssub_vec:
96
.get_crash_info = x86_cpu_get_crash_info,
92
+ case INDEX_op_ussub_vec:
97
.write_elf32_note = x86_cpu_write_elf32_note,
93
return C_O1_I2(w, w, w);
98
.write_elf64_note = x86_cpu_write_elf64_note,
94
99
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
95
case INDEX_op_not_vec:
100
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
101
102
#ifndef CONFIG_USER_ONLY
103
- cc->asidx_from_attrs = x86_asidx_from_attrs;
104
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
105
cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
106
cc->sysemu_ops = &i386_sysemu_ops;
107
--
96
--
108
2.25.1
97
2.34.1
109
110
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Jiajie Chen <c@jia.je>
2
2
3
cpu_get_crash_info() is called on GUEST_PANICKED events,
3
Lower the following ops:
4
which only occur in system emulation.
5
4
6
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
- shlv_vec
6
- shrv_vec
7
- sarv_vec
8
9
Signed-off-by: Jiajie Chen <c@jia.je>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20210517105140.1062037-18-f4bug@amsat.org>
11
Message-Id: <20230908022302.180442-12-c@jia.je>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
13
---
11
include/hw/core/cpu.h | 1 -
14
tcg/loongarch64/tcg-target.h | 2 +-
12
include/hw/core/sysemu-cpu-ops.h | 5 +++++
15
tcg/loongarch64/tcg-target.c.inc | 24 ++++++++++++++++++++++++
13
hw/core/cpu-sysemu.c | 4 ++--
16
2 files changed, 25 insertions(+), 1 deletion(-)
14
target/i386/cpu.c | 2 +-
15
target/s390x/cpu.c | 2 +-
16
5 files changed, 9 insertions(+), 5 deletions(-)
17
17
18
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
18
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
19
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
20
--- a/include/hw/core/cpu.h
20
--- a/tcg/loongarch64/tcg-target.h
21
+++ b/include/hw/core/cpu.h
21
+++ b/tcg/loongarch64/tcg-target.h
22
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
22
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
23
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
23
#define TCG_TARGET_HAS_mul_vec 1
24
uint8_t *buf, int len, bool is_write);
24
#define TCG_TARGET_HAS_shi_vec 0
25
void (*dump_state)(CPUState *cpu, FILE *, int flags);
25
#define TCG_TARGET_HAS_shs_vec 0
26
- GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
26
-#define TCG_TARGET_HAS_shv_vec 0
27
void (*dump_statistics)(CPUState *cpu, int flags);
27
+#define TCG_TARGET_HAS_shv_vec 1
28
int64_t (*get_arch_id)(CPUState *cpu);
28
#define TCG_TARGET_HAS_roti_vec 0
29
bool (*get_paging_enabled)(const CPUState *cpu);
29
#define TCG_TARGET_HAS_rots_vec 0
30
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
30
#define TCG_TARGET_HAS_rotv_vec 0
31
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
32
--- a/include/hw/core/sysemu-cpu-ops.h
33
--- a/tcg/loongarch64/tcg-target.c.inc
33
+++ b/include/hw/core/sysemu-cpu-ops.h
34
+++ b/tcg/loongarch64/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
35
* struct SysemuCPUOps: System operations specific to a CPU class
36
static const LoongArchInsn ussub_vec_insn[4] = {
36
*/
37
OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
37
typedef struct SysemuCPUOps {
38
};
38
+ /**
39
+ static const LoongArchInsn shlv_vec_insn[4] = {
39
+ * @get_crash_info: Callback for reporting guest crash information in
40
+ OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D
40
+ * GUEST_PANICKED events.
41
+ };
41
+ */
42
+ static const LoongArchInsn shrv_vec_insn[4] = {
42
+ GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
43
+ OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D
43
/**
44
+ };
44
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
45
+ static const LoongArchInsn sarv_vec_insn[4] = {
45
* runtime configurable endianness is currently big-endian.
46
+ OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
46
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
47
+ };
47
index XXXXXXX..XXXXXXX 100644
48
48
--- a/hw/core/cpu-sysemu.c
49
a0 = args[0];
49
+++ b/hw/core/cpu-sysemu.c
50
a1 = args[1];
50
@@ -XXX,XX +XXX,XX @@ GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
51
CPUClass *cc = CPU_GET_CLASS(cpu);
52
case INDEX_op_ussub_vec:
52
GuestPanicInformation *res = NULL;
53
tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
53
54
break;
54
- if (cc->get_crash_info) {
55
+ case INDEX_op_shlv_vec:
55
- res = cc->get_crash_info(cpu);
56
+ tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2));
56
+ if (cc->sysemu_ops->get_crash_info) {
57
+ break;
57
+ res = cc->sysemu_ops->get_crash_info(cpu);
58
+ case INDEX_op_shrv_vec:
58
}
59
+ tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2));
59
return res;
60
+ break;
60
}
61
+ case INDEX_op_sarv_vec:
61
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
62
+ tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
62
index XXXXXXX..XXXXXXX 100644
63
+ break;
63
--- a/target/i386/cpu.c
64
case INDEX_op_dupm_vec:
64
+++ b/target/i386/cpu.c
65
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
65
@@ -XXX,XX +XXX,XX @@ static Property x86_cpu_properties[] = {
66
break;
66
#include "hw/core/sysemu-cpu-ops.h"
67
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
67
68
case INDEX_op_usadd_vec:
68
static const struct SysemuCPUOps i386_sysemu_ops = {
69
case INDEX_op_sssub_vec:
69
+ .get_crash_info = x86_cpu_get_crash_info,
70
case INDEX_op_ussub_vec:
70
.legacy_vmsd = &vmstate_x86_cpu,
71
+ case INDEX_op_shlv_vec:
71
};
72
+ case INDEX_op_shrv_vec:
72
#endif
73
+ case INDEX_op_sarv_vec:
73
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
74
return 1;
74
cc->asidx_from_attrs = x86_asidx_from_attrs;
75
default:
75
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
76
return 0;
76
cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
77
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
77
- cc->get_crash_info = x86_cpu_get_crash_info;
78
case INDEX_op_usadd_vec:
78
cc->write_elf64_note = x86_cpu_write_elf64_note;
79
case INDEX_op_sssub_vec:
79
cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
80
case INDEX_op_ussub_vec:
80
cc->write_elf32_note = x86_cpu_write_elf32_note;
81
+ case INDEX_op_shlv_vec:
81
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
82
+ case INDEX_op_shrv_vec:
82
index XXXXXXX..XXXXXXX 100644
83
+ case INDEX_op_sarv_vec:
83
--- a/target/s390x/cpu.c
84
return C_O1_I2(w, w, w);
84
+++ b/target/s390x/cpu.c
85
85
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_full(DeviceState *dev)
86
case INDEX_op_not_vec:
86
#include "hw/core/sysemu-cpu-ops.h"
87
88
static const struct SysemuCPUOps s390_sysemu_ops = {
89
+ .get_crash_info = s390_cpu_get_crash_info,
90
.legacy_vmsd = &vmstate_s390_cpu,
91
};
92
#endif
93
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
94
cc->gdb_write_register = s390_cpu_gdb_write_register;
95
#ifndef CONFIG_USER_ONLY
96
cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
97
- cc->get_crash_info = s390_cpu_get_crash_info;
98
cc->write_elf64_note = s390_cpu_write_elf64_note;
99
cc->sysemu_ops = &s390_sysemu_ops;
100
#endif
101
--
87
--
102
2.25.1
88
2.34.1
103
104
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Jiajie Chen <c@jia.je>
2
2
3
VirtIO devices are only meaningful with system emulation.
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
5
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Message-Id: <20210517105140.1062037-17-f4bug@amsat.org>
5
Message-Id: <20230908022302.180442-13-c@jia.je>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
7
---
10
include/hw/core/cpu.h | 5 -----
8
tcg/loongarch64/tcg-target-con-set.h | 1 +
11
include/hw/core/sysemu-cpu-ops.h | 9 +++++++++
9
tcg/loongarch64/tcg-target.h | 2 +-
12
hw/core/cpu-sysemu.c | 5 +++--
10
tcg/loongarch64/tcg-target.c.inc | 11 ++++++++++-
13
target/arm/cpu.c | 2 +-
11
3 files changed, 12 insertions(+), 2 deletions(-)
14
target/ppc/cpu_init.c | 4 +---
15
5 files changed, 14 insertions(+), 11 deletions(-)
16
12
17
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
13
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
18
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
19
--- a/include/hw/core/cpu.h
15
--- a/tcg/loongarch64/tcg-target-con-set.h
20
+++ b/include/hw/core/cpu.h
16
+++ b/tcg/loongarch64/tcg-target-con-set.h
21
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
17
@@ -XXX,XX +XXX,XX @@ C_O1_I2(r, rZ, rZ)
22
* @parse_features: Callback to parse command line arguments.
18
C_O1_I2(w, w, w)
23
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
19
C_O1_I2(w, w, wM)
24
* @has_work: Callback for checking if there is work to do.
20
C_O1_I2(w, w, wA)
25
- * @virtio_is_big_endian: Callback to return %true if a CPU which supports
21
+C_O1_I3(w, w, w, w)
26
- * runtime configurable endianness is currently big-endian. Non-configurable
22
C_O1_I4(r, rZ, rJ, rZ, rZ)
27
- * CPUs can use the default implementation of this method. This method should
23
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
28
- * not be used by any callers other than the pre-1.0 virtio devices.
29
* @memory_rw_debug: Callback for GDB memory access.
30
* @dump_state: Callback for dumping state.
31
* @dump_statistics: Callback for dumping statistics.
32
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
33
34
int reset_dump_flags;
35
bool (*has_work)(CPUState *cpu);
36
- bool (*virtio_is_big_endian)(CPUState *cpu);
37
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
38
uint8_t *buf, int len, bool is_write);
39
void (*dump_state)(CPUState *cpu, FILE *, int flags);
40
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
41
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
42
--- a/include/hw/core/sysemu-cpu-ops.h
25
--- a/tcg/loongarch64/tcg-target.h
43
+++ b/include/hw/core/sysemu-cpu-ops.h
26
+++ b/tcg/loongarch64/tcg-target.h
44
@@ -XXX,XX +XXX,XX @@
27
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
45
* struct SysemuCPUOps: System operations specific to a CPU class
28
#define TCG_TARGET_HAS_rotv_vec 0
46
*/
29
#define TCG_TARGET_HAS_sat_vec 1
47
typedef struct SysemuCPUOps {
30
#define TCG_TARGET_HAS_minmax_vec 1
48
+ /**
31
-#define TCG_TARGET_HAS_bitsel_vec 0
49
+ * @virtio_is_big_endian: Callback to return %true if a CPU which supports
32
+#define TCG_TARGET_HAS_bitsel_vec 1
50
+ * runtime configurable endianness is currently big-endian.
33
#define TCG_TARGET_HAS_cmpsel_vec 0
51
+ * Non-configurable CPUs can use the default implementation of this method.
34
52
+ * This method should not be used by any callers other than the pre-1.0
35
#define TCG_TARGET_DEFAULT_MO (0)
53
+ * virtio devices.
36
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
54
+ */
37
index XXXXXXX..XXXXXXX 100644
55
+ bool (*virtio_is_big_endian)(CPUState *cpu);
38
--- a/tcg/loongarch64/tcg-target.c.inc
39
+++ b/tcg/loongarch64/tcg-target.c.inc
40
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
41
const int const_args[TCG_MAX_OP_ARGS])
42
{
43
TCGType type = vecl + TCG_TYPE_V64;
44
- TCGArg a0, a1, a2;
45
+ TCGArg a0, a1, a2, a3;
46
TCGReg temp = TCG_REG_TMP0;
47
TCGReg temp_vec = TCG_VEC_TMP0;
48
49
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
50
a0 = args[0];
51
a1 = args[1];
52
a2 = args[2];
53
+ a3 = args[3];
54
55
/* Currently only supports V128 */
56
tcg_debug_assert(type == TCG_TYPE_V128);
57
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
58
case INDEX_op_sarv_vec:
59
tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
60
break;
61
+ case INDEX_op_bitsel_vec:
62
+ /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
63
+ tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
64
+ break;
65
case INDEX_op_dupm_vec:
66
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
67
break;
68
@@ -XXX,XX +XXX,XX @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
69
case INDEX_op_shlv_vec:
70
case INDEX_op_shrv_vec:
71
case INDEX_op_sarv_vec:
72
+ case INDEX_op_bitsel_vec:
73
return 1;
74
default:
75
return 0;
76
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
77
case INDEX_op_neg_vec:
78
return C_O1_I1(w, w);
79
80
+ case INDEX_op_bitsel_vec:
81
+ return C_O1_I3(w, w, w, w);
56
+
82
+
57
/**
83
default:
58
* @legacy_vmsd: Legacy state for migration.
84
g_assert_not_reached();
59
* Do not use in new targets, use #DeviceClass::vmsd instead.
60
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/hw/core/cpu-sysemu.c
63
+++ b/hw/core/cpu-sysemu.c
64
@@ -XXX,XX +XXX,XX @@
65
#include "qemu/osdep.h"
66
#include "qapi/error.h"
67
#include "hw/core/cpu.h"
68
+#include "hw/core/sysemu-cpu-ops.h"
69
70
bool cpu_paging_enabled(const CPUState *cpu)
71
{
72
@@ -XXX,XX +XXX,XX @@ bool cpu_virtio_is_big_endian(CPUState *cpu)
73
{
74
CPUClass *cc = CPU_GET_CLASS(cpu);
75
76
- if (cc->virtio_is_big_endian) {
77
- return cc->virtio_is_big_endian(cpu);
78
+ if (cc->sysemu_ops->virtio_is_big_endian) {
79
+ return cc->sysemu_ops->virtio_is_big_endian(cpu);
80
}
85
}
81
return target_words_bigendian();
82
}
83
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/target/arm/cpu.c
86
+++ b/target/arm/cpu.c
87
@@ -XXX,XX +XXX,XX @@ static gchar *arm_gdb_arch_name(CPUState *cs)
88
#include "hw/core/sysemu-cpu-ops.h"
89
90
static const struct SysemuCPUOps arm_sysemu_ops = {
91
+ .virtio_is_big_endian = arm_cpu_virtio_is_big_endian,
92
.legacy_vmsd = &vmstate_arm_cpu,
93
};
94
#endif
95
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
96
#ifndef CONFIG_USER_ONLY
97
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
98
cc->asidx_from_attrs = arm_asidx_from_attrs;
99
- cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
100
cc->write_elf64_note = arm_cpu_write_elf64_note;
101
cc->write_elf32_note = arm_cpu_write_elf32_note;
102
cc->sysemu_ops = &arm_sysemu_ops;
103
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
104
index XXXXXXX..XXXXXXX 100644
105
--- a/target/ppc/cpu_init.c
106
+++ b/target/ppc/cpu_init.c
107
@@ -XXX,XX +XXX,XX @@ static Property ppc_cpu_properties[] = {
108
#include "hw/core/sysemu-cpu-ops.h"
109
110
static const struct SysemuCPUOps ppc_sysemu_ops = {
111
+ .virtio_is_big_endian = ppc_cpu_is_big_endian,
112
.legacy_vmsd = &vmstate_ppc_cpu,
113
};
114
#endif
115
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
116
cc->gdb_core_xml_file = "power64-core.xml";
117
#else
118
cc->gdb_core_xml_file = "power-core.xml";
119
-#endif
120
-#ifndef CONFIG_USER_ONLY
121
- cc->virtio_is_big_endian = ppc_cpu_is_big_endian;
122
#endif
123
cc->disas_set_info = ppc_disas_set_info;
124
125
--
86
--
126
2.25.1
87
2.34.1
127
128
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Jiajie Chen <c@jia.je>
2
2
3
See rationale in previous commit. Targets should use the vmsd field
3
Lower the following ops:
4
of DeviceClass, not CPUClass. As migration is not important on the
5
AVR target, break the migration compatibility and set the DeviceClass
6
vmsd field. To feel safer, increment the vmstate version.
7
4
8
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
5
- shli_vec
9
Message-Id: <20210517105140.1062037-14-f4bug@amsat.org>
6
- shrv_vec
7
- sarv_vec
8
9
Signed-off-by: Jiajie Chen <c@jia.je>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20230908022302.180442-14-c@jia.je>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
13
---
12
target/avr/cpu.c | 2 +-
14
tcg/loongarch64/tcg-target.h | 2 +-
13
target/avr/machine.c | 4 ++--
15
tcg/loongarch64/tcg-target.c.inc | 21 +++++++++++++++++++++
14
2 files changed, 3 insertions(+), 3 deletions(-)
16
2 files changed, 22 insertions(+), 1 deletion(-)
15
17
16
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
18
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
17
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
18
--- a/target/avr/cpu.c
20
--- a/tcg/loongarch64/tcg-target.h
19
+++ b/target/avr/cpu.c
21
+++ b/tcg/loongarch64/tcg-target.h
20
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
22
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
21
cc->set_pc = avr_cpu_set_pc;
23
#define TCG_TARGET_HAS_nor_vec 1
22
cc->memory_rw_debug = avr_cpu_memory_rw_debug;
24
#define TCG_TARGET_HAS_eqv_vec 0
23
cc->get_phys_page_debug = avr_cpu_get_phys_page_debug;
25
#define TCG_TARGET_HAS_mul_vec 1
24
- cc->legacy_vmsd = &vms_avr_cpu;
26
-#define TCG_TARGET_HAS_shi_vec 0
25
+ dc->vmsd = &vms_avr_cpu;
27
+#define TCG_TARGET_HAS_shi_vec 1
26
cc->disas_set_info = avr_cpu_disas_set_info;
28
#define TCG_TARGET_HAS_shs_vec 0
27
cc->gdb_read_register = avr_cpu_gdb_read_register;
29
#define TCG_TARGET_HAS_shv_vec 1
28
cc->gdb_write_register = avr_cpu_gdb_write_register;
30
#define TCG_TARGET_HAS_roti_vec 0
29
diff --git a/target/avr/machine.c b/target/avr/machine.c
31
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
30
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
31
--- a/target/avr/machine.c
33
--- a/tcg/loongarch64/tcg-target.c.inc
32
+++ b/target/avr/machine.c
34
+++ b/tcg/loongarch64/tcg-target.c.inc
33
@@ -XXX,XX +XXX,XX @@ static const VMStateInfo vms_eind = {
35
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
34
36
static const LoongArchInsn sarv_vec_insn[4] = {
35
const VMStateDescription vms_avr_cpu = {
37
OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
36
.name = "cpu",
38
};
37
- .version_id = 0,
39
+ static const LoongArchInsn shli_vec_insn[4] = {
38
- .minimum_version_id = 0,
40
+ OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D
39
+ .version_id = 1,
41
+ };
40
+ .minimum_version_id = 1,
42
+ static const LoongArchInsn shri_vec_insn[4] = {
41
.fields = (VMStateField[]) {
43
+ OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D
42
VMSTATE_UINT32(env.pc_w, AVRCPU),
44
+ };
43
VMSTATE_UINT32(env.sp, AVRCPU),
45
+ static const LoongArchInsn sari_vec_insn[4] = {
46
+ OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
47
+ };
48
49
a0 = args[0];
50
a1 = args[1];
51
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
52
case INDEX_op_sarv_vec:
53
tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
54
break;
55
+ case INDEX_op_shli_vec:
56
+ tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2));
57
+ break;
58
+ case INDEX_op_shri_vec:
59
+ tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2));
60
+ break;
61
+ case INDEX_op_sari_vec:
62
+ tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
63
+ break;
64
case INDEX_op_bitsel_vec:
65
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
66
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
67
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
68
69
case INDEX_op_not_vec:
70
case INDEX_op_neg_vec:
71
+ case INDEX_op_shli_vec:
72
+ case INDEX_op_shri_vec:
73
+ case INDEX_op_sari_vec:
74
return C_O1_I1(w, w);
75
76
case INDEX_op_bitsel_vec:
44
--
77
--
45
2.25.1
78
2.34.1
46
47
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Jiajie Chen <c@jia.je>
2
2
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Lower the following ops:
4
5
- rotrv_vec
6
- rotlv_vec
7
8
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20210517105140.1062037-5-f4bug@amsat.org>
10
Message-Id: <20230908022302.180442-15-c@jia.je>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
12
---
8
hw/core/cpu.c | 1 -
13
tcg/loongarch64/tcg-target.h | 2 +-
9
1 file changed, 1 deletion(-)
14
tcg/loongarch64/tcg-target.c.inc | 14 ++++++++++++++
15
2 files changed, 15 insertions(+), 1 deletion(-)
10
16
11
diff --git a/hw/core/cpu.c b/hw/core/cpu.c
17
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
12
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
13
--- a/hw/core/cpu.c
19
--- a/tcg/loongarch64/tcg-target.h
14
+++ b/hw/core/cpu.c
20
+++ b/tcg/loongarch64/tcg-target.h
15
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
16
#include "hw/qdev-properties.h"
22
#define TCG_TARGET_HAS_shv_vec 1
17
#include "trace/trace-root.h"
23
#define TCG_TARGET_HAS_roti_vec 0
18
#include "qemu/plugin.h"
24
#define TCG_TARGET_HAS_rots_vec 0
19
-#include "sysemu/hw_accel.h"
25
-#define TCG_TARGET_HAS_rotv_vec 0
20
26
+#define TCG_TARGET_HAS_rotv_vec 1
21
CPUState *cpu_by_arch_id(int64_t id)
27
#define TCG_TARGET_HAS_sat_vec 1
22
{
28
#define TCG_TARGET_HAS_minmax_vec 1
29
#define TCG_TARGET_HAS_bitsel_vec 1
30
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
31
index XXXXXXX..XXXXXXX 100644
32
--- a/tcg/loongarch64/tcg-target.c.inc
33
+++ b/tcg/loongarch64/tcg-target.c.inc
34
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
35
static const LoongArchInsn sari_vec_insn[4] = {
36
OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
37
};
38
+ static const LoongArchInsn rotrv_vec_insn[4] = {
39
+ OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D
40
+ };
41
42
a0 = args[0];
43
a1 = args[1];
44
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
45
case INDEX_op_sari_vec:
46
tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
47
break;
48
+ case INDEX_op_rotrv_vec:
49
+ tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2));
50
+ break;
51
+ case INDEX_op_rotlv_vec:
52
+ /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
53
+ tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2));
54
+ tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
55
+ temp_vec));
56
+ break;
57
case INDEX_op_bitsel_vec:
58
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
59
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
60
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
61
case INDEX_op_shlv_vec:
62
case INDEX_op_shrv_vec:
63
case INDEX_op_sarv_vec:
64
+ case INDEX_op_rotrv_vec:
65
+ case INDEX_op_rotlv_vec:
66
return C_O1_I2(w, w, w);
67
68
case INDEX_op_not_vec:
23
--
69
--
24
2.25.1
70
2.34.1
25
26
diff view generated by jsdifflib
1
From: Pavel Dovgalyuk <pavel.dovgalyuk@ispras.ru>
1
From: Jiajie Chen <c@jia.je>
2
2
3
This patch enables reverse debugging with watchpoints.
3
Signed-off-by: Jiajie Chen <c@jia.je>
4
Reverse continue scans the execution to find the breakpoints
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
and watchpoints that should fire. It uses helper function
5
Message-Id: <20230908022302.180442-16-c@jia.je>
6
replay_breakpoint() for that. But this function needs to access
7
icount, which can't be correct in the middle of TB.
8
Therefore, in case of watchpoint, we have to retranslate the block
9
to allow this access.
10
11
Signed-off-by: Pavel Dovgalyuk <Pavel.Dovgalyuk@ispras.ru>
12
Message-Id: <162072430303.827403.7379783546934958566.stgit@pasha-ThinkPad-X280>
13
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
---
7
---
15
softmmu/physmem.c | 10 ++++++++++
8
tcg/loongarch64/tcg-target.h | 2 +-
16
1 file changed, 10 insertions(+)
9
tcg/loongarch64/tcg-target.c.inc | 21 +++++++++++++++++++++
10
2 files changed, 22 insertions(+), 1 deletion(-)
17
11
18
diff --git a/softmmu/physmem.c b/softmmu/physmem.c
12
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
19
index XXXXXXX..XXXXXXX 100644
13
index XXXXXXX..XXXXXXX 100644
20
--- a/softmmu/physmem.c
14
--- a/tcg/loongarch64/tcg-target.h
21
+++ b/softmmu/physmem.c
15
+++ b/tcg/loongarch64/tcg-target.h
22
@@ -XXX,XX +XXX,XX @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
16
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
23
if (watchpoint_address_matches(wp, addr, len)
17
#define TCG_TARGET_HAS_shi_vec 1
24
&& (wp->flags & flags)) {
18
#define TCG_TARGET_HAS_shs_vec 0
25
if (replay_running_debug()) {
19
#define TCG_TARGET_HAS_shv_vec 1
26
+ /*
20
-#define TCG_TARGET_HAS_roti_vec 0
27
+ * replay_breakpoint reads icount.
21
+#define TCG_TARGET_HAS_roti_vec 1
28
+ * Force recompile to succeed, because icount may
22
#define TCG_TARGET_HAS_rots_vec 0
29
+ * be read only at the end of the block.
23
#define TCG_TARGET_HAS_rotv_vec 1
30
+ */
24
#define TCG_TARGET_HAS_sat_vec 1
31
+ if (!cpu->can_do_io) {
25
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
32
+ /* Force execution of one insn next time. */
26
index XXXXXXX..XXXXXXX 100644
33
+ cpu->cflags_next_tb = 1 | CF_LAST_IO | curr_cflags(cpu);
27
--- a/tcg/loongarch64/tcg-target.c.inc
34
+ cpu_loop_exit_restore(cpu, ra);
28
+++ b/tcg/loongarch64/tcg-target.c.inc
35
+ }
29
@@ -XXX,XX +XXX,XX @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
36
/*
30
tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
37
* Don't process the watchpoints when we are
31
temp_vec));
38
* in a reverse debugging operation.
32
break;
33
+ case INDEX_op_rotli_vec:
34
+ /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
35
+ a2 = extract32(-a2, 0, 3 + vece);
36
+ switch (vece) {
37
+ case MO_8:
38
+ tcg_out_opc_vrotri_b(s, a0, a1, a2);
39
+ break;
40
+ case MO_16:
41
+ tcg_out_opc_vrotri_h(s, a0, a1, a2);
42
+ break;
43
+ case MO_32:
44
+ tcg_out_opc_vrotri_w(s, a0, a1, a2);
45
+ break;
46
+ case MO_64:
47
+ tcg_out_opc_vrotri_d(s, a0, a1, a2);
48
+ break;
49
+ default:
50
+ g_assert_not_reached();
51
+ }
52
+ break;
53
case INDEX_op_bitsel_vec:
54
/* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
55
tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
56
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
57
case INDEX_op_shli_vec:
58
case INDEX_op_shri_vec:
59
case INDEX_op_sari_vec:
60
+ case INDEX_op_rotli_vec:
61
return C_O1_I1(w, w);
62
63
case INDEX_op_bitsel_vec:
39
--
64
--
40
2.25.1
65
2.34.1
41
42
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
From: Jiajie Chen <c@jia.je>
2
2
3
No code uses CPUClass::get_paging_enabled() outside of hw/core/cpu.c:
3
If LSX is available, use LSX instructions to implement 128-bit load &
4
store when MO_128 is required, otherwise use two 64-bit loads & stores.
4
5
5
$ git grep -F -- '->get_paging_enabled'
6
Signed-off-by: Jiajie Chen <c@jia.je>
6
hw/core/cpu.c:74: return cc->get_paging_enabled(cpu);
7
Message-Id: <20230908022302.180442-17-c@jia.je>
7
hw/core/cpu.c:438: k->get_paging_enabled = cpu_common_get_paging_enabled;
8
target/i386/cpu.c:7418: cc->get_paging_enabled = x86_cpu_get_paging_enabled;
9
10
Check the handler presence in place and remove the common fallback code.
11
12
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
14
Message-Id: <20210517105140.1062037-10-f4bug@amsat.org>
15
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
16
---
10
---
17
hw/core/cpu-common.c | 13 -------------
11
tcg/loongarch64/tcg-target-con-set.h | 2 +
18
hw/core/cpu-sysemu.c | 11 +++++++++++
12
tcg/loongarch64/tcg-target.h | 2 +-
19
2 files changed, 11 insertions(+), 13 deletions(-)
13
tcg/loongarch64/tcg-target.c.inc | 63 ++++++++++++++++++++++++++++
14
3 files changed, 66 insertions(+), 1 deletion(-)
20
15
21
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
16
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
22
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
23
--- a/hw/core/cpu-common.c
18
--- a/tcg/loongarch64/tcg-target-con-set.h
24
+++ b/hw/core/cpu-common.c
19
+++ b/tcg/loongarch64/tcg-target-con-set.h
25
@@ -XXX,XX +XXX,XX @@ CPUState *cpu_create(const char *typename)
20
@@ -XXX,XX +XXX,XX @@ C_O0_I1(r)
26
return cpu;
21
C_O0_I2(rZ, r)
22
C_O0_I2(rZ, rZ)
23
C_O0_I2(w, r)
24
+C_O0_I3(r, r, r)
25
C_O1_I1(r, r)
26
C_O1_I1(w, r)
27
C_O1_I1(w, w)
28
@@ -XXX,XX +XXX,XX @@ C_O1_I2(w, w, wM)
29
C_O1_I2(w, w, wA)
30
C_O1_I3(w, w, w, w)
31
C_O1_I4(r, rZ, rJ, rZ, rZ)
32
+C_O2_I1(r, r, r)
33
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
34
index XXXXXXX..XXXXXXX 100644
35
--- a/tcg/loongarch64/tcg-target.h
36
+++ b/tcg/loongarch64/tcg-target.h
37
@@ -XXX,XX +XXX,XX @@ extern bool use_lsx_instructions;
38
#define TCG_TARGET_HAS_muluh_i64 1
39
#define TCG_TARGET_HAS_mulsh_i64 1
40
41
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
42
+#define TCG_TARGET_HAS_qemu_ldst_i128 use_lsx_instructions
43
44
#define TCG_TARGET_HAS_v64 0
45
#define TCG_TARGET_HAS_v128 use_lsx_instructions
46
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
47
index XXXXXXX..XXXXXXX 100644
48
--- a/tcg/loongarch64/tcg-target.c.inc
49
+++ b/tcg/loongarch64/tcg-target.c.inc
50
@@ -XXX,XX +XXX,XX @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
51
}
27
}
52
}
28
53
29
-bool cpu_paging_enabled(const CPUState *cpu)
54
+static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
30
-{
55
+ TCGReg addr_reg, MemOpIdx oi, bool is_ld)
31
- CPUClass *cc = CPU_GET_CLASS(cpu);
32
-
33
- return cc->get_paging_enabled(cpu);
34
-}
35
-
36
-static bool cpu_common_get_paging_enabled(const CPUState *cpu)
37
-{
38
- return false;
39
-}
40
-
41
void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
42
Error **errp)
43
{
44
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
45
k->parse_features = cpu_common_parse_features;
46
k->get_arch_id = cpu_common_get_arch_id;
47
k->has_work = cpu_common_has_work;
48
- k->get_paging_enabled = cpu_common_get_paging_enabled;
49
k->get_memory_mapping = cpu_common_get_memory_mapping;
50
k->gdb_read_register = cpu_common_gdb_read_register;
51
k->gdb_write_register = cpu_common_gdb_write_register;
52
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/hw/core/cpu-sysemu.c
55
+++ b/hw/core/cpu-sysemu.c
56
@@ -XXX,XX +XXX,XX @@
57
#include "qapi/error.h"
58
#include "hw/core/cpu.h"
59
60
+bool cpu_paging_enabled(const CPUState *cpu)
61
+{
56
+{
62
+ CPUClass *cc = CPU_GET_CLASS(cpu);
57
+ TCGLabelQemuLdst *ldst;
58
+ HostAddress h;
63
+
59
+
64
+ if (cc->get_paging_enabled) {
60
+ ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
65
+ return cc->get_paging_enabled(cpu);
61
+
62
+ if (h.aa.atom == MO_128) {
63
+ /*
64
+ * Use VLDX/VSTX when 128-bit atomicity is required.
65
+ * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
66
+ */
67
+ if (is_ld) {
68
+ tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
69
+ tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
70
+ tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
71
+ } else {
72
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
73
+ tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
74
+ tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
75
+ }
76
+ } else {
77
+ /* Otherwise use a pair of LD/ST. */
78
+ TCGReg base = h.base;
79
+ if (h.index != TCG_REG_ZERO) {
80
+ base = TCG_REG_TMP0;
81
+ tcg_out_opc_add_d(s, base, h.base, h.index);
82
+ }
83
+ if (is_ld) {
84
+ tcg_out_opc_ld_d(s, data_lo, base, 0);
85
+ tcg_out_opc_ld_d(s, data_hi, base, 8);
86
+ } else {
87
+ tcg_out_opc_st_d(s, data_lo, base, 0);
88
+ tcg_out_opc_st_d(s, data_hi, base, 8);
89
+ }
66
+ }
90
+ }
67
+
91
+
68
+ return false;
92
+ if (ldst) {
93
+ ldst->type = TCG_TYPE_I128;
94
+ ldst->datalo_reg = data_lo;
95
+ ldst->datahi_reg = data_hi;
96
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
97
+ }
69
+}
98
+}
70
+
99
+
71
hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
100
/*
72
MemTxAttrs *attrs)
101
* Entry-points
73
{
102
*/
103
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
104
TCGArg a0 = args[0];
105
TCGArg a1 = args[1];
106
TCGArg a2 = args[2];
107
+ TCGArg a3 = args[3];
108
int c2 = const_args[2];
109
110
switch (opc) {
111
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
112
case INDEX_op_qemu_ld_a64_i64:
113
tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
114
break;
115
+ case INDEX_op_qemu_ld_a32_i128:
116
+ case INDEX_op_qemu_ld_a64_i128:
117
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
118
+ break;
119
case INDEX_op_qemu_st_a32_i32:
120
case INDEX_op_qemu_st_a64_i32:
121
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
122
@@ -XXX,XX +XXX,XX @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
123
case INDEX_op_qemu_st_a64_i64:
124
tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
125
break;
126
+ case INDEX_op_qemu_st_a32_i128:
127
+ case INDEX_op_qemu_st_a64_i128:
128
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
129
+ break;
130
131
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
132
case INDEX_op_mov_i64:
133
@@ -XXX,XX +XXX,XX @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
134
case INDEX_op_qemu_st_a64_i64:
135
return C_O0_I2(rZ, r);
136
137
+ case INDEX_op_qemu_ld_a32_i128:
138
+ case INDEX_op_qemu_ld_a64_i128:
139
+ return C_O2_I1(r, r, r);
140
+
141
+ case INDEX_op_qemu_st_a32_i128:
142
+ case INDEX_op_qemu_st_a64_i128:
143
+ return C_O0_I3(r, r, r);
144
+
145
case INDEX_op_brcond_i32:
146
case INDEX_op_brcond_i64:
147
return C_O0_I2(rZ, rZ);
74
--
148
--
75
2.25.1
149
2.34.1
76
77
diff view generated by jsdifflib
1
We no longer have any runtime modifications to this struct,
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
so declare them all const.
2
Tested-by: Song Gao <gaosong@loongson.cn>
3
Reviewed-by: Song Gao <gaosong@loongson.cn>
4
Message-Id: <20230831030904.1194667-2-richard.henderson@linaro.org>
5
---
6
accel/tcg/tcg-runtime.h | 25 ++++++
7
include/tcg/tcg-op-gvec-common.h | 6 ++
8
accel/tcg/tcg-runtime-gvec.c | 26 ++++++
9
tcg/tcg-op-gvec.c | 149 +++++++++++++++++++++++++++++++
10
4 files changed, 206 insertions(+)
3
11
4
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
5
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
index XXXXXXX..XXXXXXX 100644
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
14
--- a/accel/tcg/tcg-runtime.h
7
Message-ID: <20210227232519.222663-3-richard.henderson@linaro.org>
15
+++ b/accel/tcg/tcg-runtime.h
8
---
16
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
9
include/hw/core/cpu.h | 2 +-
17
DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
10
target/alpha/cpu.c | 2 +-
18
DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
11
target/arm/cpu.c | 2 +-
19
12
target/arm/cpu_tcg.c | 2 +-
20
+DEF_HELPER_FLAGS_4(gvec_eqs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
13
target/avr/cpu.c | 2 +-
21
+DEF_HELPER_FLAGS_4(gvec_eqs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
14
target/cris/cpu.c | 4 ++--
22
+DEF_HELPER_FLAGS_4(gvec_eqs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
15
target/hexagon/cpu.c | 2 +-
23
+DEF_HELPER_FLAGS_4(gvec_eqs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
16
target/hppa/cpu.c | 2 +-
24
+
17
target/i386/tcg/tcg-cpu.c | 2 +-
25
+DEF_HELPER_FLAGS_4(gvec_lts8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
18
target/m68k/cpu.c | 2 +-
26
+DEF_HELPER_FLAGS_4(gvec_lts16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
19
target/microblaze/cpu.c | 2 +-
27
+DEF_HELPER_FLAGS_4(gvec_lts32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
20
target/mips/cpu.c | 2 +-
28
+DEF_HELPER_FLAGS_4(gvec_lts64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
21
target/nios2/cpu.c | 2 +-
29
+
22
target/openrisc/cpu.c | 2 +-
30
+DEF_HELPER_FLAGS_4(gvec_les8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
23
target/ppc/cpu_init.c | 2 +-
31
+DEF_HELPER_FLAGS_4(gvec_les16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
24
target/riscv/cpu.c | 2 +-
32
+DEF_HELPER_FLAGS_4(gvec_les32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
25
target/rx/cpu.c | 2 +-
33
+DEF_HELPER_FLAGS_4(gvec_les64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
26
target/s390x/cpu.c | 2 +-
34
+
27
target/sh4/cpu.c | 2 +-
35
+DEF_HELPER_FLAGS_4(gvec_ltus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
28
target/sparc/cpu.c | 2 +-
36
+DEF_HELPER_FLAGS_4(gvec_ltus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
29
target/tricore/cpu.c | 2 +-
37
+DEF_HELPER_FLAGS_4(gvec_ltus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
30
target/xtensa/cpu.c | 2 +-
38
+DEF_HELPER_FLAGS_4(gvec_ltus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
31
22 files changed, 23 insertions(+), 23 deletions(-)
39
+
32
40
+DEF_HELPER_FLAGS_4(gvec_leus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
33
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
41
+DEF_HELPER_FLAGS_4(gvec_leus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
34
index XXXXXXX..XXXXXXX 100644
42
+DEF_HELPER_FLAGS_4(gvec_leus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
35
--- a/include/hw/core/cpu.h
43
+DEF_HELPER_FLAGS_4(gvec_leus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
36
+++ b/include/hw/core/cpu.h
44
+
37
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
45
DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
38
const struct SysemuCPUOps *sysemu_ops;
46
diff --git a/include/tcg/tcg-op-gvec-common.h b/include/tcg/tcg-op-gvec-common.h
39
47
index XXXXXXX..XXXXXXX 100644
40
/* when TCG is not available, this pointer is NULL */
48
--- a/include/tcg/tcg-op-gvec-common.h
41
- struct TCGCPUOps *tcg_ops;
49
+++ b/include/tcg/tcg-op-gvec-common.h
42
+ const struct TCGCPUOps *tcg_ops;
50
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_rotrv(unsigned vece, uint32_t dofs, uint32_t aofs,
43
51
void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
44
/*
52
uint32_t aofs, uint32_t bofs,
45
* if not NULL, this is called in order for the CPUClass to initialize
53
uint32_t oprsz, uint32_t maxsz);
46
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
54
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
47
index XXXXXXX..XXXXXXX 100644
55
+ uint32_t aofs, int64_t c,
48
--- a/target/alpha/cpu.c
56
+ uint32_t oprsz, uint32_t maxsz);
49
+++ b/target/alpha/cpu.c
57
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
50
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps alpha_sysemu_ops = {
58
+ uint32_t aofs, TCGv_i64 c,
51
59
+ uint32_t oprsz, uint32_t maxsz);
52
#include "hw/core/tcg-cpu-ops.h"
60
53
61
/*
54
-static struct TCGCPUOps alpha_tcg_ops = {
62
* Perform vector bit select: d = (b & a) | (c & ~a).
55
+static const struct TCGCPUOps alpha_tcg_ops = {
63
diff --git a/accel/tcg/tcg-runtime-gvec.c b/accel/tcg/tcg-runtime-gvec.c
56
.initialize = alpha_translate_init,
64
index XXXXXXX..XXXXXXX 100644
57
.cpu_exec_interrupt = alpha_cpu_exec_interrupt,
65
--- a/accel/tcg/tcg-runtime-gvec.c
58
.tlb_fill = alpha_cpu_tlb_fill,
66
+++ b/accel/tcg/tcg-runtime-gvec.c
59
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
67
@@ -XXX,XX +XXX,XX @@ DO_CMP2(64)
60
index XXXXXXX..XXXXXXX 100644
68
#undef DO_CMP1
61
--- a/target/arm/cpu.c
69
#undef DO_CMP2
62
+++ b/target/arm/cpu.c
70
63
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps arm_sysemu_ops = {
71
+#define DO_CMP1(NAME, TYPE, OP) \
64
#endif
72
+void HELPER(NAME)(void *d, void *a, uint64_t b64, uint32_t desc) \
65
73
+{ \
66
#ifdef CONFIG_TCG
74
+ intptr_t oprsz = simd_oprsz(desc); \
67
-static struct TCGCPUOps arm_tcg_ops = {
75
+ TYPE inv = simd_data(desc), b = b64; \
68
+static const struct TCGCPUOps arm_tcg_ops = {
76
+ for (intptr_t i = 0; i < oprsz; i += sizeof(TYPE)) { \
69
.initialize = arm_translate_init,
77
+ *(TYPE *)(d + i) = -((*(TYPE *)(a + i) OP b) ^ inv); \
70
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
78
+ } \
71
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
79
+ clear_high(d, oprsz, desc); \
72
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
80
+}
73
index XXXXXXX..XXXXXXX 100644
81
+
74
--- a/target/arm/cpu_tcg.c
82
+#define DO_CMP2(SZ) \
75
+++ b/target/arm/cpu_tcg.c
83
+ DO_CMP1(gvec_eqs##SZ, uint##SZ##_t, ==) \
76
@@ -XXX,XX +XXX,XX @@ static void pxa270c5_initfn(Object *obj)
84
+ DO_CMP1(gvec_lts##SZ, int##SZ##_t, <) \
85
+ DO_CMP1(gvec_les##SZ, int##SZ##_t, <=) \
86
+ DO_CMP1(gvec_ltus##SZ, uint##SZ##_t, <) \
87
+ DO_CMP1(gvec_leus##SZ, uint##SZ##_t, <=)
88
+
89
+DO_CMP2(8)
90
+DO_CMP2(16)
91
+DO_CMP2(32)
92
+DO_CMP2(64)
93
+
94
+#undef DO_CMP1
95
+#undef DO_CMP2
96
+
97
void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
98
{
99
intptr_t oprsz = simd_oprsz(desc);
100
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
101
index XXXXXXX..XXXXXXX 100644
102
--- a/tcg/tcg-op-gvec.c
103
+++ b/tcg/tcg-op-gvec.c
104
@@ -XXX,XX +XXX,XX @@ void tcg_gen_gvec_cmp(TCGCond cond, unsigned vece, uint32_t dofs,
105
}
77
}
106
}
78
107
79
#ifdef CONFIG_TCG
108
+static void expand_cmps_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
80
-static struct TCGCPUOps arm_v7m_tcg_ops = {
109
+ uint32_t oprsz, uint32_t tysz, TCGType type,
81
+static const struct TCGCPUOps arm_v7m_tcg_ops = {
110
+ TCGCond cond, TCGv_vec c)
82
.initialize = arm_translate_init,
111
+{
83
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
112
+ TCGv_vec t0 = tcg_temp_new_vec(type);
84
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
113
+ TCGv_vec t1 = tcg_temp_new_vec(type);
85
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
114
+ uint32_t i;
86
index XXXXXXX..XXXXXXX 100644
115
+
87
--- a/target/avr/cpu.c
116
+ for (i = 0; i < oprsz; i += tysz) {
88
+++ b/target/avr/cpu.c
117
+ tcg_gen_ld_vec(t1, cpu_env, aofs + i);
89
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps avr_sysemu_ops = {
118
+ tcg_gen_cmp_vec(cond, vece, t0, t1, c);
90
119
+ tcg_gen_st_vec(t0, cpu_env, dofs + i);
91
#include "hw/core/tcg-cpu-ops.h"
120
+ }
92
121
+}
93
-static struct TCGCPUOps avr_tcg_ops = {
122
+
94
+static const struct TCGCPUOps avr_tcg_ops = {
123
+void tcg_gen_gvec_cmps(TCGCond cond, unsigned vece, uint32_t dofs,
95
.initialize = avr_cpu_tcg_init,
124
+ uint32_t aofs, TCGv_i64 c,
96
.synchronize_from_tb = avr_cpu_synchronize_from_tb,
125
+ uint32_t oprsz, uint32_t maxsz)
97
.cpu_exec_interrupt = avr_cpu_exec_interrupt,
126
+{
98
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
127
+ static const TCGOpcode cmp_list[] = { INDEX_op_cmp_vec, 0 };
99
index XXXXXXX..XXXXXXX 100644
128
+ static gen_helper_gvec_2i * const eq_fn[4] = {
100
--- a/target/cris/cpu.c
129
+ gen_helper_gvec_eqs8, gen_helper_gvec_eqs16,
101
+++ b/target/cris/cpu.c
130
+ gen_helper_gvec_eqs32, gen_helper_gvec_eqs64
102
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps cris_sysemu_ops = {
131
+ };
103
132
+ static gen_helper_gvec_2i * const lt_fn[4] = {
104
#include "hw/core/tcg-cpu-ops.h"
133
+ gen_helper_gvec_lts8, gen_helper_gvec_lts16,
105
134
+ gen_helper_gvec_lts32, gen_helper_gvec_lts64
106
-static struct TCGCPUOps crisv10_tcg_ops = {
135
+ };
107
+static const struct TCGCPUOps crisv10_tcg_ops = {
136
+ static gen_helper_gvec_2i * const le_fn[4] = {
108
.initialize = cris_initialize_crisv10_tcg,
137
+ gen_helper_gvec_les8, gen_helper_gvec_les16,
109
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
138
+ gen_helper_gvec_les32, gen_helper_gvec_les64
110
.tlb_fill = cris_cpu_tlb_fill,
139
+ };
111
@@ -XXX,XX +XXX,XX @@ static struct TCGCPUOps crisv10_tcg_ops = {
140
+ static gen_helper_gvec_2i * const ltu_fn[4] = {
112
#endif /* !CONFIG_USER_ONLY */
141
+ gen_helper_gvec_ltus8, gen_helper_gvec_ltus16,
113
};
142
+ gen_helper_gvec_ltus32, gen_helper_gvec_ltus64
114
143
+ };
115
-static struct TCGCPUOps crisv32_tcg_ops = {
144
+ static gen_helper_gvec_2i * const leu_fn[4] = {
116
+static const struct TCGCPUOps crisv32_tcg_ops = {
145
+ gen_helper_gvec_leus8, gen_helper_gvec_leus16,
117
.initialize = cris_initialize_tcg,
146
+ gen_helper_gvec_leus32, gen_helper_gvec_leus64
118
.cpu_exec_interrupt = cris_cpu_exec_interrupt,
147
+ };
119
.tlb_fill = cris_cpu_tlb_fill,
148
+ static gen_helper_gvec_2i * const * const fns[16] = {
120
diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c
149
+ [TCG_COND_EQ] = eq_fn,
121
index XXXXXXX..XXXXXXX 100644
150
+ [TCG_COND_LT] = lt_fn,
122
--- a/target/hexagon/cpu.c
151
+ [TCG_COND_LE] = le_fn,
123
+++ b/target/hexagon/cpu.c
152
+ [TCG_COND_LTU] = ltu_fn,
124
@@ -XXX,XX +XXX,XX @@ static bool hexagon_tlb_fill(CPUState *cs, vaddr address, int size,
153
+ [TCG_COND_LEU] = leu_fn,
125
154
+ };
126
#include "hw/core/tcg-cpu-ops.h"
155
+
127
156
+ TCGType type;
128
-static struct TCGCPUOps hexagon_tcg_ops = {
157
+
129
+static const struct TCGCPUOps hexagon_tcg_ops = {
158
+ check_size_align(oprsz, maxsz, dofs | aofs);
130
.initialize = hexagon_translate_init,
159
+ check_overlap_2(dofs, aofs, maxsz);
131
.synchronize_from_tb = hexagon_cpu_synchronize_from_tb,
160
+
132
.tlb_fill = hexagon_tlb_fill,
161
+ if (cond == TCG_COND_NEVER || cond == TCG_COND_ALWAYS) {
133
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
162
+ do_dup(MO_8, dofs, oprsz, maxsz,
134
index XXXXXXX..XXXXXXX 100644
163
+ NULL, NULL, -(cond == TCG_COND_ALWAYS));
135
--- a/target/hppa/cpu.c
164
+ return;
136
+++ b/target/hppa/cpu.c
165
+ }
137
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps hppa_sysemu_ops = {
166
+
138
167
+ /*
139
#include "hw/core/tcg-cpu-ops.h"
168
+ * Implement inline with a vector type, if possible.
140
169
+ * Prefer integer when 64-bit host and 64-bit comparison.
141
-static struct TCGCPUOps hppa_tcg_ops = {
170
+ */
142
+static const struct TCGCPUOps hppa_tcg_ops = {
171
+ type = choose_vector_type(cmp_list, vece, oprsz,
143
.initialize = hppa_translate_init,
172
+ TCG_TARGET_REG_BITS == 64 && vece == MO_64);
144
.synchronize_from_tb = hppa_cpu_synchronize_from_tb,
173
+ if (type != 0) {
145
.cpu_exec_interrupt = hppa_cpu_exec_interrupt,
174
+ const TCGOpcode *hold_list = tcg_swap_vecop_list(cmp_list);
146
diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c
175
+ TCGv_vec t_vec = tcg_temp_new_vec(type);
147
index XXXXXXX..XXXXXXX 100644
176
+ uint32_t some;
148
--- a/target/i386/tcg/tcg-cpu.c
177
+
149
+++ b/target/i386/tcg/tcg-cpu.c
178
+ tcg_gen_dup_i64_vec(vece, t_vec, c);
150
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_synchronize_from_tb(CPUState *cs,
179
+ switch (type) {
151
180
+ case TCG_TYPE_V256:
152
#include "hw/core/tcg-cpu-ops.h"
181
+ some = QEMU_ALIGN_DOWN(oprsz, 32);
153
182
+ expand_cmps_vec(vece, dofs, aofs, some, 32,
154
-static struct TCGCPUOps x86_tcg_ops = {
183
+ TCG_TYPE_V256, cond, t_vec);
155
+static const struct TCGCPUOps x86_tcg_ops = {
184
+ aofs += some;
156
.initialize = tcg_x86_init,
185
+ dofs += some;
157
.synchronize_from_tb = x86_cpu_synchronize_from_tb,
186
+ oprsz -= some;
158
.cpu_exec_enter = x86_cpu_exec_enter,
187
+ maxsz -= some;
159
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
188
+ /* fallthru */
160
index XXXXXXX..XXXXXXX 100644
189
+
161
--- a/target/m68k/cpu.c
190
+ case TCG_TYPE_V128:
162
+++ b/target/m68k/cpu.c
191
+ some = QEMU_ALIGN_DOWN(oprsz, 16);
163
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps m68k_sysemu_ops = {
192
+ expand_cmps_vec(vece, dofs, aofs, some, 16,
164
193
+ TCG_TYPE_V128, cond, t_vec);
165
#include "hw/core/tcg-cpu-ops.h"
194
+ break;
166
195
+
167
-static struct TCGCPUOps m68k_tcg_ops = {
196
+ case TCG_TYPE_V64:
168
+static const struct TCGCPUOps m68k_tcg_ops = {
197
+ some = QEMU_ALIGN_DOWN(oprsz, 8);
169
.initialize = m68k_tcg_init,
198
+ expand_cmps_vec(vece, dofs, aofs, some, 8,
170
.cpu_exec_interrupt = m68k_cpu_exec_interrupt,
199
+ TCG_TYPE_V64, cond, t_vec);
171
.tlb_fill = m68k_cpu_tlb_fill,
200
+ break;
172
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
201
+
173
index XXXXXXX..XXXXXXX 100644
202
+ default:
174
--- a/target/microblaze/cpu.c
203
+ g_assert_not_reached();
175
+++ b/target/microblaze/cpu.c
204
+ }
176
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mb_sysemu_ops = {
205
+ tcg_temp_free_vec(t_vec);
177
206
+ tcg_swap_vecop_list(hold_list);
178
#include "hw/core/tcg-cpu-ops.h"
207
+ } else if (vece == MO_64 && check_size_impl(oprsz, 8)) {
179
208
+ TCGv_i64 t0 = tcg_temp_ebb_new_i64();
180
-static struct TCGCPUOps mb_tcg_ops = {
209
+ uint32_t i;
181
+static const struct TCGCPUOps mb_tcg_ops = {
210
+
182
.initialize = mb_tcg_init,
211
+ for (i = 0; i < oprsz; i += 8) {
183
.synchronize_from_tb = mb_cpu_synchronize_from_tb,
212
+ tcg_gen_ld_i64(t0, cpu_env, aofs + i);
184
.cpu_exec_interrupt = mb_cpu_exec_interrupt,
213
+ tcg_gen_negsetcond_i64(cond, t0, t0, c);
185
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
214
+ tcg_gen_st_i64(t0, cpu_env, dofs + i);
186
index XXXXXXX..XXXXXXX 100644
215
+ }
187
--- a/target/mips/cpu.c
216
+ tcg_temp_free_i64(t0);
188
+++ b/target/mips/cpu.c
217
+ } else if (vece == MO_32 && check_size_impl(oprsz, 4)) {
189
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps mips_sysemu_ops = {
218
+ TCGv_i32 t0 = tcg_temp_ebb_new_i32();
190
* NB: cannot be const, as some elements are changed for specific
219
+ TCGv_i32 t1 = tcg_temp_ebb_new_i32();
191
* mips hardware (see hw/mips/jazz.c).
220
+ uint32_t i;
192
*/
221
+
193
-static struct TCGCPUOps mips_tcg_ops = {
222
+ tcg_gen_extrl_i64_i32(t1, c);
194
+static const struct TCGCPUOps mips_tcg_ops = {
223
+ for (i = 0; i < oprsz; i += 8) {
195
.initialize = mips_tcg_init,
224
+ tcg_gen_ld_i32(t0, cpu_env, aofs + i);
196
.synchronize_from_tb = mips_cpu_synchronize_from_tb,
225
+ tcg_gen_negsetcond_i32(cond, t0, t0, t1);
197
.cpu_exec_interrupt = mips_cpu_exec_interrupt,
226
+ tcg_gen_st_i32(t0, cpu_env, dofs + i);
198
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
227
+ }
199
index XXXXXXX..XXXXXXX 100644
228
+ tcg_temp_free_i32(t0);
200
--- a/target/nios2/cpu.c
229
+ tcg_temp_free_i32(t1);
201
+++ b/target/nios2/cpu.c
230
+ } else {
202
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps nios2_sysemu_ops = {
231
+ gen_helper_gvec_2i * const *fn = fns[cond];
203
232
+ bool inv = false;
204
#include "hw/core/tcg-cpu-ops.h"
233
+
205
234
+ if (fn == NULL) {
206
-static struct TCGCPUOps nios2_tcg_ops = {
235
+ cond = tcg_invert_cond(cond);
207
+static const struct TCGCPUOps nios2_tcg_ops = {
236
+ fn = fns[cond];
208
.initialize = nios2_tcg_init,
237
+ assert(fn != NULL);
209
.cpu_exec_interrupt = nios2_cpu_exec_interrupt,
238
+ inv = true;
210
.tlb_fill = nios2_cpu_tlb_fill,
239
+ }
211
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
240
+ tcg_gen_gvec_2i_ool(dofs, aofs, c, oprsz, maxsz, inv, fn[vece]);
212
index XXXXXXX..XXXXXXX 100644
241
+ return;
213
--- a/target/openrisc/cpu.c
242
+ }
214
+++ b/target/openrisc/cpu.c
243
+
215
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps openrisc_sysemu_ops = {
244
+ if (oprsz < maxsz) {
216
245
+ expand_clr(dofs + oprsz, maxsz - oprsz);
217
#include "hw/core/tcg-cpu-ops.h"
246
+ }
218
247
+}
219
-static struct TCGCPUOps openrisc_tcg_ops = {
248
+
220
+static const struct TCGCPUOps openrisc_tcg_ops = {
249
+void tcg_gen_gvec_cmpi(TCGCond cond, unsigned vece, uint32_t dofs,
221
.initialize = openrisc_translate_init,
250
+ uint32_t aofs, int64_t c,
222
.cpu_exec_interrupt = openrisc_cpu_exec_interrupt,
251
+ uint32_t oprsz, uint32_t maxsz)
223
.tlb_fill = openrisc_cpu_tlb_fill,
252
+{
224
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
253
+ TCGv_i64 tmp = tcg_constant_i64(c);
225
index XXXXXXX..XXXXXXX 100644
254
+ tcg_gen_gvec_cmps(cond, vece, dofs, aofs, tmp, oprsz, maxsz);
226
--- a/target/ppc/cpu_init.c
255
+}
227
+++ b/target/ppc/cpu_init.c
256
+
228
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps ppc_sysemu_ops = {
257
static void tcg_gen_bitsel_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c)
229
#ifdef CONFIG_TCG
258
{
230
#include "hw/core/tcg-cpu-ops.h"
259
TCGv_i64 t = tcg_temp_ebb_new_i64();
231
232
-static struct TCGCPUOps ppc_tcg_ops = {
233
+static const struct TCGCPUOps ppc_tcg_ops = {
234
.initialize = ppc_translate_init,
235
.cpu_exec_interrupt = ppc_cpu_exec_interrupt,
236
.tlb_fill = ppc_cpu_tlb_fill,
237
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
238
index XXXXXXX..XXXXXXX 100644
239
--- a/target/riscv/cpu.c
240
+++ b/target/riscv/cpu.c
241
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps riscv_sysemu_ops = {
242
243
#include "hw/core/tcg-cpu-ops.h"
244
245
-static struct TCGCPUOps riscv_tcg_ops = {
246
+static const struct TCGCPUOps riscv_tcg_ops = {
247
.initialize = riscv_translate_init,
248
.synchronize_from_tb = riscv_cpu_synchronize_from_tb,
249
.cpu_exec_interrupt = riscv_cpu_exec_interrupt,
250
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
251
index XXXXXXX..XXXXXXX 100644
252
--- a/target/rx/cpu.c
253
+++ b/target/rx/cpu.c
254
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps rx_sysemu_ops = {
255
256
#include "hw/core/tcg-cpu-ops.h"
257
258
-static struct TCGCPUOps rx_tcg_ops = {
259
+static const struct TCGCPUOps rx_tcg_ops = {
260
.initialize = rx_translate_init,
261
.synchronize_from_tb = rx_cpu_synchronize_from_tb,
262
.cpu_exec_interrupt = rx_cpu_exec_interrupt,
263
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
264
index XXXXXXX..XXXXXXX 100644
265
--- a/target/s390x/cpu.c
266
+++ b/target/s390x/cpu.c
267
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps s390_sysemu_ops = {
268
#ifdef CONFIG_TCG
269
#include "hw/core/tcg-cpu-ops.h"
270
271
-static struct TCGCPUOps s390_tcg_ops = {
272
+static const struct TCGCPUOps s390_tcg_ops = {
273
.initialize = s390x_translate_init,
274
.tlb_fill = s390_cpu_tlb_fill,
275
276
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
277
index XXXXXXX..XXXXXXX 100644
278
--- a/target/sh4/cpu.c
279
+++ b/target/sh4/cpu.c
280
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sh4_sysemu_ops = {
281
282
#include "hw/core/tcg-cpu-ops.h"
283
284
-static struct TCGCPUOps superh_tcg_ops = {
285
+static const struct TCGCPUOps superh_tcg_ops = {
286
.initialize = sh4_translate_init,
287
.synchronize_from_tb = superh_cpu_synchronize_from_tb,
288
.cpu_exec_interrupt = superh_cpu_exec_interrupt,
289
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
290
index XXXXXXX..XXXXXXX 100644
291
--- a/target/sparc/cpu.c
292
+++ b/target/sparc/cpu.c
293
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps sparc_sysemu_ops = {
294
#ifdef CONFIG_TCG
295
#include "hw/core/tcg-cpu-ops.h"
296
297
-static struct TCGCPUOps sparc_tcg_ops = {
298
+static const struct TCGCPUOps sparc_tcg_ops = {
299
.initialize = sparc_tcg_init,
300
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
301
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
302
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
303
index XXXXXXX..XXXXXXX 100644
304
--- a/target/tricore/cpu.c
305
+++ b/target/tricore/cpu.c
306
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps tricore_sysemu_ops = {
307
308
#include "hw/core/tcg-cpu-ops.h"
309
310
-static struct TCGCPUOps tricore_tcg_ops = {
311
+static const struct TCGCPUOps tricore_tcg_ops = {
312
.initialize = tricore_tcg_init,
313
.synchronize_from_tb = tricore_cpu_synchronize_from_tb,
314
.tlb_fill = tricore_cpu_tlb_fill,
315
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
316
index XXXXXXX..XXXXXXX 100644
317
--- a/target/xtensa/cpu.c
318
+++ b/target/xtensa/cpu.c
319
@@ -XXX,XX +XXX,XX @@ static const struct SysemuCPUOps xtensa_sysemu_ops = {
320
321
#include "hw/core/tcg-cpu-ops.h"
322
323
-static struct TCGCPUOps xtensa_tcg_ops = {
324
+static const struct TCGCPUOps xtensa_tcg_ops = {
325
.initialize = xtensa_translate_init,
326
.cpu_exec_interrupt = xtensa_cpu_exec_interrupt,
327
.tlb_fill = xtensa_cpu_tlb_fill,
328
--
260
--
329
2.25.1
261
2.34.1
330
331
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Reviewed-by: Song Gao <gaosong@loongson.cn>
3
Message-Id: <20230831030904.1194667-3-richard.henderson@linaro.org>
4
---
5
target/arm/tcg/translate.c | 56 ++++++--------------------------------
6
1 file changed, 9 insertions(+), 47 deletions(-)
2
7
3
Migration is specific to system emulation.
8
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
4
5
Restrict current DeviceClass::vmsd to sysemu using #ifdef'ry,
6
and assert in cpu_exec_realizefn() that dc->vmsd not set under
7
user emulation.
8
9
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20210517105140.1062037-12-f4bug@amsat.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
14
cpu.c | 2 ++
15
target/sh4/cpu.c | 5 +++--
16
target/xtensa/cpu.c | 4 +++-
17
3 files changed, 8 insertions(+), 3 deletions(-)
18
19
diff --git a/cpu.c b/cpu.c
20
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
21
--- a/cpu.c
10
--- a/target/arm/tcg/translate.c
22
+++ b/cpu.c
11
+++ b/target/arm/tcg/translate.c
23
@@ -XXX,XX +XXX,XX @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
12
@@ -XXX,XX +XXX,XX @@ void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
24
#endif /* CONFIG_TCG */
13
gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
25
26
#ifdef CONFIG_USER_ONLY
27
+ assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
28
+ qdev_get_vmsd(DEVICE(cpu))->unmigratable);
29
assert(cc->vmsd == NULL);
30
#else
31
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
32
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/sh4/cpu.c
35
+++ b/target/sh4/cpu.c
36
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_initfn(Object *obj)
37
env->movcal_backup_tail = &(env->movcal_backup);
38
}
14
}
39
15
40
+#ifndef CONFIG_USER_ONLY
16
-#define GEN_CMP0(NAME, COND) \
41
static const VMStateDescription vmstate_sh_cpu = {
17
- static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
42
.name = "cpu",
18
- { \
43
.unmigratable = 1,
19
- tcg_gen_negsetcond_i32(COND, d, a, tcg_constant_i32(0)); \
44
};
20
- } \
45
+#endif
21
- static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
46
22
- { \
47
#include "hw/core/tcg-cpu-ops.h"
23
- tcg_gen_negsetcond_i64(COND, d, a, tcg_constant_i64(0)); \
48
24
- } \
49
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
25
- static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
50
cc->gdb_write_register = superh_cpu_gdb_write_register;
26
- { \
51
#ifndef CONFIG_USER_ONLY
27
- TCGv_vec zero = tcg_constant_vec_matching(d, vece, 0); \
52
cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
28
- tcg_gen_cmp_vec(COND, vece, d, a, zero); \
53
+ dc->vmsd = &vmstate_sh_cpu;
29
- } \
54
#endif
30
- void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
55
cc->disas_set_info = superh_cpu_disas_set_info;
31
- uint32_t opr_sz, uint32_t max_sz) \
56
32
- { \
57
cc->gdb_num_core_regs = 59;
33
- const GVecGen2 op[4] = { \
34
- { .fno = gen_helper_gvec_##NAME##0_b, \
35
- .fniv = gen_##NAME##0_vec, \
36
- .opt_opc = vecop_list_cmp, \
37
- .vece = MO_8 }, \
38
- { .fno = gen_helper_gvec_##NAME##0_h, \
39
- .fniv = gen_##NAME##0_vec, \
40
- .opt_opc = vecop_list_cmp, \
41
- .vece = MO_16 }, \
42
- { .fni4 = gen_##NAME##0_i32, \
43
- .fniv = gen_##NAME##0_vec, \
44
- .opt_opc = vecop_list_cmp, \
45
- .vece = MO_32 }, \
46
- { .fni8 = gen_##NAME##0_i64, \
47
- .fniv = gen_##NAME##0_vec, \
48
- .opt_opc = vecop_list_cmp, \
49
- .prefer_i64 = TCG_TARGET_REG_BITS == 64, \
50
- .vece = MO_64 }, \
51
- }; \
52
- tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \
53
- }
54
+#define GEN_CMP0(NAME, COND) \
55
+ void NAME(unsigned vece, uint32_t d, uint32_t m, \
56
+ uint32_t opr_sz, uint32_t max_sz) \
57
+ { tcg_gen_gvec_cmpi(COND, vece, d, m, 0, opr_sz, max_sz); }
58
59
-static const TCGOpcode vecop_list_cmp[] = {
60
- INDEX_op_cmp_vec, 0
61
-};
58
-
62
-
59
- dc->vmsd = &vmstate_sh_cpu;
63
-GEN_CMP0(ceq, TCG_COND_EQ)
60
cc->tcg_ops = &superh_tcg_ops;
64
-GEN_CMP0(cle, TCG_COND_LE)
61
}
65
-GEN_CMP0(cge, TCG_COND_GE)
62
66
-GEN_CMP0(clt, TCG_COND_LT)
63
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
67
-GEN_CMP0(cgt, TCG_COND_GT)
64
index XXXXXXX..XXXXXXX 100644
68
+GEN_CMP0(gen_gvec_ceq0, TCG_COND_EQ)
65
--- a/target/xtensa/cpu.c
69
+GEN_CMP0(gen_gvec_cle0, TCG_COND_LE)
66
+++ b/target/xtensa/cpu.c
70
+GEN_CMP0(gen_gvec_cge0, TCG_COND_GE)
67
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_initfn(Object *obj)
71
+GEN_CMP0(gen_gvec_clt0, TCG_COND_LT)
68
#endif
72
+GEN_CMP0(gen_gvec_cgt0, TCG_COND_GT)
69
}
73
70
74
#undef GEN_CMP0
71
+#ifndef CONFIG_USER_ONLY
72
static const VMStateDescription vmstate_xtensa_cpu = {
73
.name = "cpu",
74
.unmigratable = 1,
75
};
76
+#endif
77
78
#include "hw/core/tcg-cpu-ops.h"
79
80
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
81
cc->gdb_stop_before_watchpoint = true;
82
#ifndef CONFIG_USER_ONLY
83
cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
84
+ dc->vmsd = &vmstate_xtensa_cpu;
85
#endif
86
cc->disas_set_info = xtensa_cpu_disas_set_info;
87
- dc->vmsd = &vmstate_xtensa_cpu;
88
cc->tcg_ops = &xtensa_tcg_ops;
89
}
90
75
91
--
76
--
92
2.25.1
77
2.34.1
93
94
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Now that we defer address space update and tlb_flush until
2
the next async_run_on_cpu, the plugin run at the end of the
3
instruction no longer has to contend with a flushed tlb.
4
Therefore, delete SavedIOTLB entirely.
2
5
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
6
Properly return false from tlb_plugin_lookup when we do
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
not have a tlb match.
5
Message-Id: <20210517105140.1062037-21-f4bug@amsat.org>
8
6
[rth: Drop declaration movement from target/*/cpu.h]
9
Fixes a bug in which SavedIOTLB had stale data, because
10
there were multiple i/o accesses within a single insn.
11
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
13
---
9
include/hw/core/cpu.h | 8 --------
14
include/hw/core/cpu.h | 13 -------
10
include/hw/core/sysemu-cpu-ops.h | 13 +++++++++++++
15
include/qemu/typedefs.h | 1 -
11
hw/core/cpu-sysemu.c | 6 +++---
16
accel/tcg/cputlb.c | 79 ++++++++++++-----------------------------
12
target/alpha/cpu.c | 2 +-
17
3 files changed, 23 insertions(+), 70 deletions(-)
13
target/arm/cpu.c | 2 +-
14
target/avr/cpu.c | 2 +-
15
target/cris/cpu.c | 2 +-
16
target/hppa/cpu.c | 2 +-
17
target/i386/cpu.c | 2 +-
18
target/m68k/cpu.c | 2 +-
19
target/microblaze/cpu.c | 2 +-
20
target/mips/cpu.c | 2 +-
21
target/nios2/cpu.c | 2 +-
22
target/openrisc/cpu.c | 2 +-
23
target/ppc/cpu_init.c | 2 +-
24
target/riscv/cpu.c | 2 +-
25
target/rx/cpu.c | 2 +-
26
target/s390x/cpu.c | 2 +-
27
target/sh4/cpu.c | 2 +-
28
target/sparc/cpu.c | 2 +-
29
target/tricore/cpu.c | 2 +-
30
target/xtensa/cpu.c | 2 +-
31
22 files changed, 35 insertions(+), 30 deletions(-)
32
18
33
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
19
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
34
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
35
--- a/include/hw/core/cpu.h
21
--- a/include/hw/core/cpu.h
36
+++ b/include/hw/core/cpu.h
22
+++ b/include/hw/core/cpu.h
37
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
23
@@ -XXX,XX +XXX,XX @@ struct CPUWatchpoint {
38
* If the target behaviour here is anything other than "set
24
QTAILQ_ENTRY(CPUWatchpoint) entry;
39
* the PC register to the value passed in" then the target must
25
};
40
* also implement the synchronize_from_tb hook.
26
41
- * @get_phys_page_debug: Callback for obtaining a physical address.
27
-#ifdef CONFIG_PLUGIN
42
- * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
28
-/*
43
- * associated memory transaction attributes to use for the access.
29
- * For plugins we sometime need to save the resolved iotlb data before
44
- * CPUs which use memory transaction attributes should implement this
30
- * the memory regions get moved around by io_writex.
45
- * instead of get_phys_page_debug.
31
- */
46
* @gdb_read_register: Callback for letting GDB read a register.
32
-typedef struct SavedIOTLB {
47
* @gdb_write_register: Callback for letting GDB write a register.
33
- MemoryRegionSection *section;
48
* @gdb_num_core_regs: Number of core registers accessible to GDB.
34
- hwaddr mr_offset;
49
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
35
-} SavedIOTLB;
50
void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
36
-#endif
51
Error **errp);
37
-
52
void (*set_pc)(CPUState *cpu, vaddr value);
38
struct KVMState;
53
- hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
39
struct kvm_run;
54
- hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
40
55
- MemTxAttrs *attrs);
41
@@ -XXX,XX +XXX,XX @@ struct CPUState {
56
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
42
57
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
43
#ifdef CONFIG_PLUGIN
58
44
GArray *plugin_mem_cbs;
59
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
45
- /* saved iotlb data from io_writex */
46
- SavedIOTLB saved_iotlb;
47
#endif
48
49
/* TODO Move common fields from CPUArchState here. */
50
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
60
index XXXXXXX..XXXXXXX 100644
51
index XXXXXXX..XXXXXXX 100644
61
--- a/include/hw/core/sysemu-cpu-ops.h
52
--- a/include/qemu/typedefs.h
62
+++ b/include/hw/core/sysemu-cpu-ops.h
53
+++ b/include/qemu/typedefs.h
63
@@ -XXX,XX +XXX,XX @@
54
@@ -XXX,XX +XXX,XX @@ typedef struct QString QString;
64
* struct SysemuCPUOps: System operations specific to a CPU class
55
typedef struct RAMBlock RAMBlock;
56
typedef struct Range Range;
57
typedef struct ReservedRegion ReservedRegion;
58
-typedef struct SavedIOTLB SavedIOTLB;
59
typedef struct SHPCDevice SHPCDevice;
60
typedef struct SSIBus SSIBus;
61
typedef struct TCGHelperInfo TCGHelperInfo;
62
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/accel/tcg/cputlb.c
65
+++ b/accel/tcg/cputlb.c
66
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
67
}
68
}
69
70
-/*
71
- * Save a potentially trashed CPUTLBEntryFull for later lookup by plugin.
72
- * This is read by tlb_plugin_lookup if the fulltlb entry doesn't match
73
- * because of the side effect of io_writex changing memory layout.
74
- */
75
-static void save_iotlb_data(CPUState *cs, MemoryRegionSection *section,
76
- hwaddr mr_offset)
77
-{
78
-#ifdef CONFIG_PLUGIN
79
- SavedIOTLB *saved = &cs->saved_iotlb;
80
- saved->section = section;
81
- saved->mr_offset = mr_offset;
82
-#endif
83
-}
84
-
85
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
86
int mmu_idx, vaddr addr, uintptr_t retaddr,
87
MMUAccessType access_type, MemOp op)
88
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
89
cpu_io_recompile(cpu, retaddr);
90
}
91
92
- /*
93
- * The memory_region_dispatch may trigger a flush/resize
94
- * so for plugins we save the iotlb_data just in case.
95
- */
96
- save_iotlb_data(cpu, section, mr_offset);
97
-
98
{
99
QEMU_IOTHREAD_LOCK_GUARD();
100
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
101
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
102
}
103
cpu->mem_io_pc = retaddr;
104
105
- /*
106
- * The memory_region_dispatch may trigger a flush/resize
107
- * so for plugins we save the iotlb_data just in case.
108
- */
109
- save_iotlb_data(cpu, section, mr_offset);
110
-
111
{
112
QEMU_IOTHREAD_LOCK_GUARD();
113
r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
114
@@ -XXX,XX +XXX,XX @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
115
* in the softmmu lookup code (or helper). We don't handle re-fills or
116
* checking the victim table. This is purely informational.
117
*
118
- * This almost never fails as the memory access being instrumented
119
- * should have just filled the TLB. The one corner case is io_writex
120
- * which can cause TLB flushes and potential resizing of the TLBs
121
- * losing the information we need. In those cases we need to recover
122
- * data from a copy of the CPUTLBEntryFull. As long as this always occurs
123
- * from the same thread (which a mem callback will be) this is safe.
124
+ * The one corner case is i/o write, which can cause changes to the
125
+ * address space. Those changes, and the corresponding tlb flush,
126
+ * should be delayed until the next TB, so even then this ought not fail.
127
+ * But check, Just in Case.
65
*/
128
*/
66
typedef struct SysemuCPUOps {
129
-
67
+ /**
130
bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
68
+ * @get_phys_page_debug: Callback for obtaining a physical address.
131
bool is_store, struct qemu_plugin_hwaddr *data)
69
+ */
70
+ hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
71
+ /**
72
+ * @get_phys_page_attrs_debug: Callback for obtaining a physical address
73
+ * and the associated memory transaction attributes to use for the
74
+ * access.
75
+ * CPUs which use memory transaction attributes should implement this
76
+ * instead of get_phys_page_debug.
77
+ */
78
+ hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
79
+ MemTxAttrs *attrs);
80
/**
81
* @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
82
* a memory access with the specified memory transaction attributes.
83
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
84
index XXXXXXX..XXXXXXX 100644
85
--- a/hw/core/cpu-sysemu.c
86
+++ b/hw/core/cpu-sysemu.c
87
@@ -XXX,XX +XXX,XX @@ hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
88
{
132
{
89
CPUClass *cc = CPU_GET_CLASS(cpu);
133
CPUArchState *env = cpu->env_ptr;
90
134
CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
91
- if (cc->get_phys_page_attrs_debug) {
135
uintptr_t index = tlb_index(env, mmu_idx, addr);
92
- return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
136
- uint64_t tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
93
+ if (cc->sysemu_ops->get_phys_page_attrs_debug) {
137
+ MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
94
+ return cc->sysemu_ops->get_phys_page_attrs_debug(cpu, addr, attrs);
138
+ uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
139
140
- if (likely(tlb_hit(tlb_addr, addr))) {
141
- /* We must have an iotlb entry for MMIO */
142
- if (tlb_addr & TLB_MMIO) {
143
- CPUTLBEntryFull *full;
144
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
145
- data->is_io = true;
146
- data->v.io.section =
147
- iotlb_to_section(cpu, full->xlat_section, full->attrs);
148
- data->v.io.offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
149
- } else {
150
- data->is_io = false;
151
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
152
- }
153
- return true;
154
- } else {
155
- SavedIOTLB *saved = &cpu->saved_iotlb;
156
- data->is_io = true;
157
- data->v.io.section = saved->section;
158
- data->v.io.offset = saved->mr_offset;
159
- return true;
160
+ if (unlikely(!tlb_hit(tlb_addr, addr))) {
161
+ return false;
95
}
162
}
96
/* Fallback for CPUs which don't implement the _attrs_ hook */
163
-}
97
*attrs = MEMTXATTRS_UNSPECIFIED;
164
98
- return cc->get_phys_page_debug(cpu, addr);
165
+ /* We must have an iotlb entry for MMIO */
99
+ return cc->sysemu_ops->get_phys_page_debug(cpu, addr);
166
+ if (tlb_addr & TLB_MMIO) {
100
}
167
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
101
168
+ hwaddr xlat = full->xlat_section;
102
hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
169
+
103
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
170
+ data->is_io = true;
104
index XXXXXXX..XXXXXXX 100644
171
+ data->v.io.offset = (xlat & TARGET_PAGE_MASK) + addr;
105
--- a/target/alpha/cpu.c
172
+ data->v.io.section =
106
+++ b/target/alpha/cpu.c
173
+ iotlb_to_section(cpu, xlat & ~TARGET_PAGE_MASK, full->attrs);
107
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_initfn(Object *obj)
174
+ } else {
108
#include "hw/core/sysemu-cpu-ops.h"
175
+ data->is_io = false;
109
176
+ data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
110
static const struct SysemuCPUOps alpha_sysemu_ops = {
177
+ }
111
+ .get_phys_page_debug = alpha_cpu_get_phys_page_debug,
178
+ return true;
112
};
179
+}
113
#endif
180
#endif
114
181
115
@@ -XXX,XX +XXX,XX @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
182
/*
116
cc->gdb_read_register = alpha_cpu_gdb_read_register;
117
cc->gdb_write_register = alpha_cpu_gdb_write_register;
118
#ifndef CONFIG_USER_ONLY
119
- cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
120
dc->vmsd = &vmstate_alpha_cpu;
121
cc->sysemu_ops = &alpha_sysemu_ops;
122
#endif
123
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
124
index XXXXXXX..XXXXXXX 100644
125
--- a/target/arm/cpu.c
126
+++ b/target/arm/cpu.c
127
@@ -XXX,XX +XXX,XX @@ static gchar *arm_gdb_arch_name(CPUState *cs)
128
#include "hw/core/sysemu-cpu-ops.h"
129
130
static const struct SysemuCPUOps arm_sysemu_ops = {
131
+ .get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug,
132
.asidx_from_attrs = arm_asidx_from_attrs,
133
.write_elf32_note = arm_cpu_write_elf32_note,
134
.write_elf64_note = arm_cpu_write_elf64_note,
135
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
136
cc->gdb_read_register = arm_cpu_gdb_read_register;
137
cc->gdb_write_register = arm_cpu_gdb_write_register;
138
#ifndef CONFIG_USER_ONLY
139
- cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
140
cc->sysemu_ops = &arm_sysemu_ops;
141
#endif
142
cc->gdb_num_core_regs = 26;
143
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
144
index XXXXXXX..XXXXXXX 100644
145
--- a/target/avr/cpu.c
146
+++ b/target/avr/cpu.c
147
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags)
148
#include "hw/core/sysemu-cpu-ops.h"
149
150
static const struct SysemuCPUOps avr_sysemu_ops = {
151
+ .get_phys_page_debug = avr_cpu_get_phys_page_debug,
152
};
153
154
#include "hw/core/tcg-cpu-ops.h"
155
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
156
cc->dump_state = avr_cpu_dump_state;
157
cc->set_pc = avr_cpu_set_pc;
158
cc->memory_rw_debug = avr_cpu_memory_rw_debug;
159
- cc->get_phys_page_debug = avr_cpu_get_phys_page_debug;
160
dc->vmsd = &vms_avr_cpu;
161
cc->sysemu_ops = &avr_sysemu_ops;
162
cc->disas_set_info = avr_cpu_disas_set_info;
163
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
164
index XXXXXXX..XXXXXXX 100644
165
--- a/target/cris/cpu.c
166
+++ b/target/cris/cpu.c
167
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_initfn(Object *obj)
168
#include "hw/core/sysemu-cpu-ops.h"
169
170
static const struct SysemuCPUOps cris_sysemu_ops = {
171
+ .get_phys_page_debug = cris_cpu_get_phys_page_debug,
172
};
173
#endif
174
175
@@ -XXX,XX +XXX,XX @@ static void cris_cpu_class_init(ObjectClass *oc, void *data)
176
cc->gdb_read_register = cris_cpu_gdb_read_register;
177
cc->gdb_write_register = cris_cpu_gdb_write_register;
178
#ifndef CONFIG_USER_ONLY
179
- cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
180
dc->vmsd = &vmstate_cris_cpu;
181
cc->sysemu_ops = &cris_sysemu_ops;
182
#endif
183
diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c
184
index XXXXXXX..XXXXXXX 100644
185
--- a/target/hppa/cpu.c
186
+++ b/target/hppa/cpu.c
187
@@ -XXX,XX +XXX,XX @@ static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
188
#include "hw/core/sysemu-cpu-ops.h"
189
190
static const struct SysemuCPUOps hppa_sysemu_ops = {
191
+ .get_phys_page_debug = hppa_cpu_get_phys_page_debug,
192
};
193
#endif
194
195
@@ -XXX,XX +XXX,XX @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
196
cc->gdb_read_register = hppa_cpu_gdb_read_register;
197
cc->gdb_write_register = hppa_cpu_gdb_write_register;
198
#ifndef CONFIG_USER_ONLY
199
- cc->get_phys_page_debug = hppa_cpu_get_phys_page_debug;
200
dc->vmsd = &vmstate_hppa_cpu;
201
cc->sysemu_ops = &hppa_sysemu_ops;
202
#endif
203
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
204
index XXXXXXX..XXXXXXX 100644
205
--- a/target/i386/cpu.c
206
+++ b/target/i386/cpu.c
207
@@ -XXX,XX +XXX,XX @@ static Property x86_cpu_properties[] = {
208
#include "hw/core/sysemu-cpu-ops.h"
209
210
static const struct SysemuCPUOps i386_sysemu_ops = {
211
+ .get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug,
212
.asidx_from_attrs = x86_asidx_from_attrs,
213
.get_crash_info = x86_cpu_get_crash_info,
214
.write_elf32_note = x86_cpu_write_elf32_note,
215
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
216
217
#ifndef CONFIG_USER_ONLY
218
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
219
- cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
220
cc->sysemu_ops = &i386_sysemu_ops;
221
#endif /* !CONFIG_USER_ONLY */
222
223
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
224
index XXXXXXX..XXXXXXX 100644
225
--- a/target/m68k/cpu.c
226
+++ b/target/m68k/cpu.c
227
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_m68k_cpu = {
228
#include "hw/core/sysemu-cpu-ops.h"
229
230
static const struct SysemuCPUOps m68k_sysemu_ops = {
231
+ .get_phys_page_debug = m68k_cpu_get_phys_page_debug,
232
};
233
#endif
234
235
@@ -XXX,XX +XXX,XX @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
236
cc->gdb_read_register = m68k_cpu_gdb_read_register;
237
cc->gdb_write_register = m68k_cpu_gdb_write_register;
238
#if defined(CONFIG_SOFTMMU)
239
- cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
240
dc->vmsd = &vmstate_m68k_cpu;
241
cc->sysemu_ops = &m68k_sysemu_ops;
242
#endif
243
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
244
index XXXXXXX..XXXXXXX 100644
245
--- a/target/microblaze/cpu.c
246
+++ b/target/microblaze/cpu.c
247
@@ -XXX,XX +XXX,XX @@ static ObjectClass *mb_cpu_class_by_name(const char *cpu_model)
248
#include "hw/core/sysemu-cpu-ops.h"
249
250
static const struct SysemuCPUOps mb_sysemu_ops = {
251
+ .get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug,
252
};
253
#endif
254
255
@@ -XXX,XX +XXX,XX @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
256
cc->gdb_write_register = mb_cpu_gdb_write_register;
257
258
#ifndef CONFIG_USER_ONLY
259
- cc->get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug;
260
dc->vmsd = &vmstate_mb_cpu;
261
cc->sysemu_ops = &mb_sysemu_ops;
262
#endif
263
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
264
index XXXXXXX..XXXXXXX 100644
265
--- a/target/mips/cpu.c
266
+++ b/target/mips/cpu.c
267
@@ -XXX,XX +XXX,XX @@ static Property mips_cpu_properties[] = {
268
#include "hw/core/sysemu-cpu-ops.h"
269
270
static const struct SysemuCPUOps mips_sysemu_ops = {
271
+ .get_phys_page_debug = mips_cpu_get_phys_page_debug,
272
.legacy_vmsd = &vmstate_mips_cpu,
273
};
274
#endif
275
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
276
cc->gdb_read_register = mips_cpu_gdb_read_register;
277
cc->gdb_write_register = mips_cpu_gdb_write_register;
278
#ifndef CONFIG_USER_ONLY
279
- cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
280
cc->sysemu_ops = &mips_sysemu_ops;
281
#endif
282
cc->disas_set_info = mips_cpu_disas_set_info;
283
diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c
284
index XXXXXXX..XXXXXXX 100644
285
--- a/target/nios2/cpu.c
286
+++ b/target/nios2/cpu.c
287
@@ -XXX,XX +XXX,XX @@ static Property nios2_properties[] = {
288
#include "hw/core/sysemu-cpu-ops.h"
289
290
static const struct SysemuCPUOps nios2_sysemu_ops = {
291
+ .get_phys_page_debug = nios2_cpu_get_phys_page_debug,
292
};
293
#endif
294
295
@@ -XXX,XX +XXX,XX @@ static void nios2_cpu_class_init(ObjectClass *oc, void *data)
296
cc->set_pc = nios2_cpu_set_pc;
297
cc->disas_set_info = nios2_cpu_disas_set_info;
298
#ifndef CONFIG_USER_ONLY
299
- cc->get_phys_page_debug = nios2_cpu_get_phys_page_debug;
300
cc->sysemu_ops = &nios2_sysemu_ops;
301
#endif
302
cc->gdb_read_register = nios2_cpu_gdb_read_register;
303
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
304
index XXXXXXX..XXXXXXX 100644
305
--- a/target/openrisc/cpu.c
306
+++ b/target/openrisc/cpu.c
307
@@ -XXX,XX +XXX,XX @@ static void openrisc_any_initfn(Object *obj)
308
#include "hw/core/sysemu-cpu-ops.h"
309
310
static const struct SysemuCPUOps openrisc_sysemu_ops = {
311
+ .get_phys_page_debug = openrisc_cpu_get_phys_page_debug,
312
};
313
#endif
314
315
@@ -XXX,XX +XXX,XX @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
316
cc->gdb_read_register = openrisc_cpu_gdb_read_register;
317
cc->gdb_write_register = openrisc_cpu_gdb_write_register;
318
#ifndef CONFIG_USER_ONLY
319
- cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
320
dc->vmsd = &vmstate_openrisc_cpu;
321
cc->sysemu_ops = &openrisc_sysemu_ops;
322
#endif
323
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
324
index XXXXXXX..XXXXXXX 100644
325
--- a/target/ppc/cpu_init.c
326
+++ b/target/ppc/cpu_init.c
327
@@ -XXX,XX +XXX,XX @@ static Property ppc_cpu_properties[] = {
328
#include "hw/core/sysemu-cpu-ops.h"
329
330
static const struct SysemuCPUOps ppc_sysemu_ops = {
331
+ .get_phys_page_debug = ppc_cpu_get_phys_page_debug,
332
.write_elf32_note = ppc32_cpu_write_elf32_note,
333
.write_elf64_note = ppc64_cpu_write_elf64_note,
334
.virtio_is_big_endian = ppc_cpu_is_big_endian,
335
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
336
cc->gdb_read_register = ppc_cpu_gdb_read_register;
337
cc->gdb_write_register = ppc_cpu_gdb_write_register;
338
#ifndef CONFIG_USER_ONLY
339
- cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
340
cc->sysemu_ops = &ppc_sysemu_ops;
341
#endif
342
343
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
344
index XXXXXXX..XXXXXXX 100644
345
--- a/target/riscv/cpu.c
346
+++ b/target/riscv/cpu.c
347
@@ -XXX,XX +XXX,XX @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
348
#include "hw/core/sysemu-cpu-ops.h"
349
350
static const struct SysemuCPUOps riscv_sysemu_ops = {
351
+ .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
352
.write_elf64_note = riscv_cpu_write_elf64_note,
353
.write_elf32_note = riscv_cpu_write_elf32_note,
354
.legacy_vmsd = &vmstate_riscv_cpu,
355
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
356
cc->gdb_stop_before_watchpoint = true;
357
cc->disas_set_info = riscv_cpu_disas_set_info;
358
#ifndef CONFIG_USER_ONLY
359
- cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
360
cc->sysemu_ops = &riscv_sysemu_ops;
361
#endif
362
cc->gdb_arch_name = riscv_gdb_arch_name;
363
diff --git a/target/rx/cpu.c b/target/rx/cpu.c
364
index XXXXXXX..XXXXXXX 100644
365
--- a/target/rx/cpu.c
366
+++ b/target/rx/cpu.c
367
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_init(Object *obj)
368
#include "hw/core/sysemu-cpu-ops.h"
369
370
static const struct SysemuCPUOps rx_sysemu_ops = {
371
+ .get_phys_page_debug = rx_cpu_get_phys_page_debug,
372
};
373
#endif
374
375
@@ -XXX,XX +XXX,XX @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
376
#endif
377
cc->gdb_read_register = rx_cpu_gdb_read_register;
378
cc->gdb_write_register = rx_cpu_gdb_write_register;
379
- cc->get_phys_page_debug = rx_cpu_get_phys_page_debug;
380
cc->disas_set_info = rx_cpu_disas_set_info;
381
382
cc->gdb_num_core_regs = 26;
383
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
384
index XXXXXXX..XXXXXXX 100644
385
--- a/target/s390x/cpu.c
386
+++ b/target/s390x/cpu.c
387
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_full(DeviceState *dev)
388
#include "hw/core/sysemu-cpu-ops.h"
389
390
static const struct SysemuCPUOps s390_sysemu_ops = {
391
+ .get_phys_page_debug = s390_cpu_get_phys_page_debug,
392
.get_crash_info = s390_cpu_get_crash_info,
393
.write_elf64_note = s390_cpu_write_elf64_note,
394
.legacy_vmsd = &vmstate_s390_cpu,
395
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
396
cc->gdb_read_register = s390_cpu_gdb_read_register;
397
cc->gdb_write_register = s390_cpu_gdb_write_register;
398
#ifndef CONFIG_USER_ONLY
399
- cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
400
cc->sysemu_ops = &s390_sysemu_ops;
401
#endif
402
cc->disas_set_info = s390_cpu_disas_set_info;
403
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
404
index XXXXXXX..XXXXXXX 100644
405
--- a/target/sh4/cpu.c
406
+++ b/target/sh4/cpu.c
407
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_sh_cpu = {
408
#include "hw/core/sysemu-cpu-ops.h"
409
410
static const struct SysemuCPUOps sh4_sysemu_ops = {
411
+ .get_phys_page_debug = superh_cpu_get_phys_page_debug,
412
};
413
#endif
414
415
@@ -XXX,XX +XXX,XX @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
416
cc->gdb_read_register = superh_cpu_gdb_read_register;
417
cc->gdb_write_register = superh_cpu_gdb_write_register;
418
#ifndef CONFIG_USER_ONLY
419
- cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
420
cc->sysemu_ops = &sh4_sysemu_ops;
421
dc->vmsd = &vmstate_sh_cpu;
422
#endif
423
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
424
index XXXXXXX..XXXXXXX 100644
425
--- a/target/sparc/cpu.c
426
+++ b/target/sparc/cpu.c
427
@@ -XXX,XX +XXX,XX @@ static Property sparc_cpu_properties[] = {
428
#include "hw/core/sysemu-cpu-ops.h"
429
430
static const struct SysemuCPUOps sparc_sysemu_ops = {
431
+ .get_phys_page_debug = sparc_cpu_get_phys_page_debug,
432
.legacy_vmsd = &vmstate_sparc_cpu,
433
};
434
#endif
435
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
436
cc->gdb_read_register = sparc_cpu_gdb_read_register;
437
cc->gdb_write_register = sparc_cpu_gdb_write_register;
438
#ifndef CONFIG_USER_ONLY
439
- cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
440
cc->sysemu_ops = &sparc_sysemu_ops;
441
#endif
442
cc->disas_set_info = cpu_sparc_disas_set_info;
443
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
444
index XXXXXXX..XXXXXXX 100644
445
--- a/target/tricore/cpu.c
446
+++ b/target/tricore/cpu.c
447
@@ -XXX,XX +XXX,XX @@ static void tc27x_initfn(Object *obj)
448
#include "hw/core/sysemu-cpu-ops.h"
449
450
static const struct SysemuCPUOps tricore_sysemu_ops = {
451
+ .get_phys_page_debug = tricore_cpu_get_phys_page_debug,
452
};
453
454
#include "hw/core/tcg-cpu-ops.h"
455
@@ -XXX,XX +XXX,XX @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
456
457
cc->dump_state = tricore_cpu_dump_state;
458
cc->set_pc = tricore_cpu_set_pc;
459
- cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug;
460
cc->sysemu_ops = &tricore_sysemu_ops;
461
cc->tcg_ops = &tricore_tcg_ops;
462
}
463
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
464
index XXXXXXX..XXXXXXX 100644
465
--- a/target/xtensa/cpu.c
466
+++ b/target/xtensa/cpu.c
467
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_xtensa_cpu = {
468
#include "hw/core/sysemu-cpu-ops.h"
469
470
static const struct SysemuCPUOps xtensa_sysemu_ops = {
471
+ .get_phys_page_debug = xtensa_cpu_get_phys_page_debug,
472
};
473
#endif
474
475
@@ -XXX,XX +XXX,XX @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
476
cc->gdb_stop_before_watchpoint = true;
477
#ifndef CONFIG_USER_ONLY
478
cc->sysemu_ops = &xtensa_sysemu_ops;
479
- cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
480
dc->vmsd = &vmstate_xtensa_cpu;
481
#endif
482
cc->disas_set_info = xtensa_cpu_disas_set_info;
483
--
183
--
484
2.25.1
184
2.34.1
485
486
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
These are common code from io_readx and io_writex.
2
2
3
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
3
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-Id: <20210517105140.1062037-22-f4bug@amsat.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
---
5
---
8
include/hw/core/cpu.h | 3 ---
6
accel/tcg/cputlb.c | 77 +++++++++++++++++++++++++++-------------------
9
include/hw/core/sysemu-cpu-ops.h | 5 +++++
7
1 file changed, 45 insertions(+), 32 deletions(-)
10
hw/core/cpu-sysemu.c | 4 ++--
11
target/i386/cpu.c | 2 +-
12
4 files changed, 8 insertions(+), 6 deletions(-)
13
8
14
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
9
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
15
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
16
--- a/include/hw/core/cpu.h
11
--- a/accel/tcg/cputlb.c
17
+++ b/include/hw/core/cpu.h
12
+++ b/accel/tcg/cputlb.c
18
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
13
@@ -XXX,XX +XXX,XX @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
19
* @dump_statistics: Callback for dumping statistics.
14
* (non-page-aligned) vaddr of the eventual memory access to get
20
* @get_arch_id: Callback for getting architecture-dependent CPU ID.
15
* the MemoryRegion offset for the access. Note that the vaddr we
21
* @get_paging_enabled: Callback for inquiring whether paging is enabled.
16
* subtract here is that of the page base, and not the same as the
22
- * @get_memory_mapping: Callback for obtaining the memory mappings.
17
- * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
23
* @set_pc: Callback for setting the Program Counter register. This
18
+ * vaddr we add back in io_prepare()/get_page_addr_code().
24
* should have the semantics used by the target architecture when
25
* setting the PC from a source such as an ELF file entry point;
26
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
27
void (*dump_statistics)(CPUState *cpu, int flags);
28
int64_t (*get_arch_id)(CPUState *cpu);
29
bool (*get_paging_enabled)(const CPUState *cpu);
30
- void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
31
- Error **errp);
32
void (*set_pc)(CPUState *cpu, vaddr value);
33
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
34
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
35
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/include/hw/core/sysemu-cpu-ops.h
38
+++ b/include/hw/core/sysemu-cpu-ops.h
39
@@ -XXX,XX +XXX,XX @@
40
* struct SysemuCPUOps: System operations specific to a CPU class
41
*/
42
typedef struct SysemuCPUOps {
43
+ /**
44
+ * @get_memory_mapping: Callback for obtaining the memory mappings.
45
+ */
46
+ void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
47
+ Error **errp);
48
/**
49
* @get_phys_page_debug: Callback for obtaining a physical address.
50
*/
19
*/
51
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
20
desc->fulltlb[index] = *full;
52
index XXXXXXX..XXXXXXX 100644
21
full = &desc->fulltlb[index];
53
--- a/hw/core/cpu-sysemu.c
22
@@ -XXX,XX +XXX,XX @@ static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
54
+++ b/hw/core/cpu-sysemu.c
23
}
55
@@ -XXX,XX +XXX,XX @@ void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
24
}
25
26
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
27
- int mmu_idx, vaddr addr, uintptr_t retaddr,
28
- MMUAccessType access_type, MemOp op)
29
+static MemoryRegionSection *
30
+io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
31
+ MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
56
{
32
{
57
CPUClass *cc = CPU_GET_CLASS(cpu);
33
CPUState *cpu = env_cpu(env);
58
34
- hwaddr mr_offset;
59
- if (cc->get_memory_mapping) {
35
MemoryRegionSection *section;
60
- cc->get_memory_mapping(cpu, list, errp);
36
- MemoryRegion *mr;
61
+ if (cc->sysemu_ops->get_memory_mapping) {
37
- uint64_t val;
62
+ cc->sysemu_ops->get_memory_mapping(cpu, list, errp);
38
- MemTxResult r;
63
return;
39
+ hwaddr mr_offset;
40
41
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
42
- mr = section->mr;
43
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
44
+ section = iotlb_to_section(cpu, xlat, attrs);
45
+ mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
46
cpu->mem_io_pc = retaddr;
47
if (!cpu->can_do_io) {
48
cpu_io_recompile(cpu, retaddr);
64
}
49
}
65
50
66
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
51
+ *out_offset = mr_offset;
67
index XXXXXXX..XXXXXXX 100644
52
+ return section;
68
--- a/target/i386/cpu.c
53
+}
69
+++ b/target/i386/cpu.c
54
+
70
@@ -XXX,XX +XXX,XX @@ static Property x86_cpu_properties[] = {
55
+static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
71
#include "hw/core/sysemu-cpu-ops.h"
56
+ unsigned size, MMUAccessType access_type, int mmu_idx,
72
57
+ MemTxResult response, uintptr_t retaddr,
73
static const struct SysemuCPUOps i386_sysemu_ops = {
58
+ MemoryRegionSection *section, hwaddr mr_offset)
74
+ .get_memory_mapping = x86_cpu_get_memory_mapping,
59
+{
75
.get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug,
60
+ hwaddr physaddr = (mr_offset +
76
.asidx_from_attrs = x86_asidx_from_attrs,
61
+ section->offset_within_address_space -
77
.get_crash_info = x86_cpu_get_crash_info,
62
+ section->offset_within_region);
78
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
63
+
79
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
64
+ cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
80
65
+ mmu_idx, full->attrs, response, retaddr);
81
#ifndef CONFIG_USER_ONLY
66
+}
82
- cc->get_memory_mapping = x86_cpu_get_memory_mapping;
67
+
83
cc->sysemu_ops = &i386_sysemu_ops;
68
+static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
84
#endif /* !CONFIG_USER_ONLY */
69
+ int mmu_idx, vaddr addr, uintptr_t retaddr,
70
+ MMUAccessType access_type, MemOp op)
71
+{
72
+ MemoryRegionSection *section;
73
+ hwaddr mr_offset;
74
+ MemoryRegion *mr;
75
+ MemTxResult r;
76
+ uint64_t val;
77
+
78
+ section = io_prepare(&mr_offset, env, full->xlat_section,
79
+ full->attrs, addr, retaddr);
80
+ mr = section->mr;
81
+
82
{
83
QEMU_IOTHREAD_LOCK_GUARD();
84
r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
85
}
86
87
if (r != MEMTX_OK) {
88
- hwaddr physaddr = mr_offset +
89
- section->offset_within_address_space -
90
- section->offset_within_region;
91
-
92
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
93
- mmu_idx, full->attrs, r, retaddr);
94
+ io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
95
+ r, retaddr, section, mr_offset);
96
}
97
return val;
98
}
99
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
100
int mmu_idx, uint64_t val, vaddr addr,
101
uintptr_t retaddr, MemOp op)
102
{
103
- CPUState *cpu = env_cpu(env);
104
- hwaddr mr_offset;
105
MemoryRegionSection *section;
106
+ hwaddr mr_offset;
107
MemoryRegion *mr;
108
MemTxResult r;
109
110
- section = iotlb_to_section(cpu, full->xlat_section, full->attrs);
111
+ section = io_prepare(&mr_offset, env, full->xlat_section,
112
+ full->attrs, addr, retaddr);
113
mr = section->mr;
114
- mr_offset = (full->xlat_section & TARGET_PAGE_MASK) + addr;
115
- if (!cpu->can_do_io) {
116
- cpu_io_recompile(cpu, retaddr);
117
- }
118
- cpu->mem_io_pc = retaddr;
119
120
{
121
QEMU_IOTHREAD_LOCK_GUARD();
122
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
123
}
124
125
if (r != MEMTX_OK) {
126
- hwaddr physaddr = mr_offset +
127
- section->offset_within_address_space -
128
- section->offset_within_region;
129
-
130
- cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
131
- MMU_DATA_STORE, mmu_idx, full->attrs, r,
132
- retaddr);
133
+ io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
134
+ r, retaddr, section, mr_offset);
135
}
136
}
85
137
86
--
138
--
87
2.25.1
139
2.34.1
88
140
89
141
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Since the introduction of CPUTLBEntryFull, we can recover
2
the full cpu address space physical address without having
3
to examine the MemoryRegionSection.
2
4
3
Quoting Peter Maydell [*]:
5
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
5
There are two ways to handle migration for
6
a CPU object:
7
8
(1) like any other device, so it has a dc->vmsd that covers
9
migration for the whole object. As usual for objects that are a
10
subclass of a parent that has state, the first entry in the
11
VMStateDescription field list is VMSTATE_CPU(), which migrates
12
the cpu_common fields, followed by whatever the CPU's own migration
13
fields are.
14
15
(2) a backwards-compatible mechanism for CPUs that were
16
originally migrated using manual "write fields to the migration
17
stream structures". The on-the-wire migration format
18
for those is based on the 'env' pointer (which isn't a QOM object),
19
and the cpu_common part of the migration data is elsewhere.
20
21
cpu_exec_realizefn() handles both possibilities:
22
23
* for type 1, dc->vmsd is set and cc->vmsd is not,
24
so cpu_exec_realizefn() does nothing, and the standard
25
"register dc->vmsd for a device" code does everything needed
26
27
* for type 2, dc->vmsd is NULL and so we register the
28
vmstate_cpu_common directly to handle the cpu-common fields,
29
and the cc->vmsd to handle the per-CPU stuff
30
31
You can't change a CPU from one type to the other without breaking
32
migration compatibility, which is why some guest architectures
33
are stuck on the cc->vmsd form. New targets should use dc->vmsd.
34
35
To avoid new targets to start using type (2), rename cc->vmsd as
36
cc->legacy_vmsd. The correct field to implement is dc->vmsd (the
37
DeviceClass one).
38
39
See also commit b170fce3dd0 ("cpu: Register VMStateDescription
40
through CPUState") for historic background.
41
42
[*] https://www.mail-archive.com/qemu-devel@nongnu.org/msg800849.html
43
44
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
45
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
46
Cc: Peter Maydell <peter.maydell@linaro.org>
47
Message-Id: <20210517105140.1062037-13-f4bug@amsat.org>
48
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
49
---
7
---
50
include/hw/core/cpu.h | 5 +++--
8
accel/tcg/cputlb.c | 12 ++++--------
51
cpu.c | 12 ++++++------
9
1 file changed, 4 insertions(+), 8 deletions(-)
52
target/arm/cpu.c | 2 +-
53
target/avr/cpu.c | 2 +-
54
target/i386/cpu.c | 2 +-
55
target/mips/cpu.c | 2 +-
56
target/ppc/cpu_init.c | 2 +-
57
target/riscv/cpu.c | 3 +--
58
target/s390x/cpu.c | 2 +-
59
target/sparc/cpu.c | 2 +-
60
10 files changed, 17 insertions(+), 17 deletions(-)
61
10
62
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
11
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
63
index XXXXXXX..XXXXXXX 100644
12
index XXXXXXX..XXXXXXX 100644
64
--- a/include/hw/core/cpu.h
13
--- a/accel/tcg/cputlb.c
65
+++ b/include/hw/core/cpu.h
14
+++ b/accel/tcg/cputlb.c
66
@@ -XXX,XX +XXX,XX @@ struct AccelCPUClass;
15
@@ -XXX,XX +XXX,XX @@ io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
67
* 32-bit VM coredump.
16
68
* @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
17
static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
69
* note to a 32-bit VM coredump.
18
unsigned size, MMUAccessType access_type, int mmu_idx,
70
- * @vmsd: State description for migration.
19
- MemTxResult response, uintptr_t retaddr,
71
+ * @legacy_vmsd: Legacy state description for migration.
20
- MemoryRegionSection *section, hwaddr mr_offset)
72
+ * Do not use in new targets, use #DeviceClass::vmsd instead.
21
+ MemTxResult response, uintptr_t retaddr)
73
* @gdb_num_core_regs: Number of core registers accessible to GDB.
22
{
74
* @gdb_core_xml_file: File name for core registers GDB XML description.
23
- hwaddr physaddr = (mr_offset +
75
* @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
24
- section->offset_within_address_space -
76
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
25
- section->offset_within_region);
77
int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
26
-
78
void *opaque);
27
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
79
28
cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
80
- const VMStateDescription *vmsd;
29
mmu_idx, full->attrs, response, retaddr);
81
+ const VMStateDescription *legacy_vmsd;
30
}
82
const char *gdb_core_xml_file;
31
@@ -XXX,XX +XXX,XX @@ static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
83
gchar * (*gdb_arch_name)(CPUState *cpu);
32
84
const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
33
if (r != MEMTX_OK) {
85
diff --git a/cpu.c b/cpu.c
34
io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
86
index XXXXXXX..XXXXXXX 100644
35
- r, retaddr, section, mr_offset);
87
--- a/cpu.c
36
+ r, retaddr);
88
+++ b/cpu.c
89
@@ -XXX,XX +XXX,XX @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
90
#ifdef CONFIG_USER_ONLY
91
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
92
qdev_get_vmsd(DEVICE(cpu))->unmigratable);
93
- assert(cc->vmsd == NULL);
94
+ assert(cc->legacy_vmsd == NULL);
95
#else
96
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
97
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
98
}
37
}
99
- if (cc->vmsd != NULL) {
38
return val;
100
- vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
39
}
101
+ if (cc->legacy_vmsd != NULL) {
40
@@ -XXX,XX +XXX,XX @@ static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
102
+ vmstate_register(NULL, cpu->cpu_index, cc->legacy_vmsd, cpu);
41
42
if (r != MEMTX_OK) {
43
io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
44
- r, retaddr, section, mr_offset);
45
+ r, retaddr);
103
}
46
}
104
#endif /* CONFIG_USER_ONLY */
105
}
47
}
106
@@ -XXX,XX +XXX,XX @@ void cpu_exec_unrealizefn(CPUState *cpu)
107
CPUClass *cc = CPU_GET_CLASS(cpu);
108
109
#ifdef CONFIG_USER_ONLY
110
- assert(cc->vmsd == NULL);
111
+ assert(cc->legacy_vmsd == NULL);
112
#else
113
- if (cc->vmsd != NULL) {
114
- vmstate_unregister(NULL, cc->vmsd, cpu);
115
+ if (cc->legacy_vmsd != NULL) {
116
+ vmstate_unregister(NULL, cc->legacy_vmsd, cpu);
117
}
118
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
119
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
120
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
121
index XXXXXXX..XXXXXXX 100644
122
--- a/target/arm/cpu.c
123
+++ b/target/arm/cpu.c
124
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
125
#ifndef CONFIG_USER_ONLY
126
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
127
cc->asidx_from_attrs = arm_asidx_from_attrs;
128
- cc->vmsd = &vmstate_arm_cpu;
129
+ cc->legacy_vmsd = &vmstate_arm_cpu;
130
cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
131
cc->write_elf64_note = arm_cpu_write_elf64_note;
132
cc->write_elf32_note = arm_cpu_write_elf32_note;
133
diff --git a/target/avr/cpu.c b/target/avr/cpu.c
134
index XXXXXXX..XXXXXXX 100644
135
--- a/target/avr/cpu.c
136
+++ b/target/avr/cpu.c
137
@@ -XXX,XX +XXX,XX @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
138
cc->set_pc = avr_cpu_set_pc;
139
cc->memory_rw_debug = avr_cpu_memory_rw_debug;
140
cc->get_phys_page_debug = avr_cpu_get_phys_page_debug;
141
- cc->vmsd = &vms_avr_cpu;
142
+ cc->legacy_vmsd = &vms_avr_cpu;
143
cc->disas_set_info = avr_cpu_disas_set_info;
144
cc->gdb_read_register = avr_cpu_gdb_read_register;
145
cc->gdb_write_register = avr_cpu_gdb_write_register;
146
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
147
index XXXXXXX..XXXXXXX 100644
148
--- a/target/i386/cpu.c
149
+++ b/target/i386/cpu.c
150
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
151
cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
152
cc->write_elf32_note = x86_cpu_write_elf32_note;
153
cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
154
- cc->vmsd = &vmstate_x86_cpu;
155
+ cc->legacy_vmsd = &vmstate_x86_cpu;
156
#endif /* !CONFIG_USER_ONLY */
157
158
cc->gdb_arch_name = x86_gdb_arch_name;
159
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
160
index XXXXXXX..XXXXXXX 100644
161
--- a/target/mips/cpu.c
162
+++ b/target/mips/cpu.c
163
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
164
cc->gdb_write_register = mips_cpu_gdb_write_register;
165
#ifndef CONFIG_USER_ONLY
166
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
167
- cc->vmsd = &vmstate_mips_cpu;
168
+ cc->legacy_vmsd = &vmstate_mips_cpu;
169
#endif
170
cc->disas_set_info = mips_cpu_disas_set_info;
171
cc->gdb_num_core_regs = 73;
172
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/target/ppc/cpu_init.c
175
+++ b/target/ppc/cpu_init.c
176
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
177
cc->gdb_write_register = ppc_cpu_gdb_write_register;
178
#ifndef CONFIG_USER_ONLY
179
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
180
- cc->vmsd = &vmstate_ppc_cpu;
181
+ cc->legacy_vmsd = &vmstate_ppc_cpu;
182
#endif
183
#if defined(CONFIG_SOFTMMU)
184
cc->write_elf64_note = ppc64_cpu_write_elf64_note;
185
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
186
index XXXXXXX..XXXXXXX 100644
187
--- a/target/riscv/cpu.c
188
+++ b/target/riscv/cpu.c
189
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
190
cc->disas_set_info = riscv_cpu_disas_set_info;
191
#ifndef CONFIG_USER_ONLY
192
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
193
- /* For now, mark unmigratable: */
194
- cc->vmsd = &vmstate_riscv_cpu;
195
+ cc->legacy_vmsd = &vmstate_riscv_cpu;
196
cc->write_elf64_note = riscv_cpu_write_elf64_note;
197
cc->write_elf32_note = riscv_cpu_write_elf32_note;
198
#endif
199
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
200
index XXXXXXX..XXXXXXX 100644
201
--- a/target/s390x/cpu.c
202
+++ b/target/s390x/cpu.c
203
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
204
cc->gdb_write_register = s390_cpu_gdb_write_register;
205
#ifndef CONFIG_USER_ONLY
206
cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
207
- cc->vmsd = &vmstate_s390_cpu;
208
+ cc->legacy_vmsd = &vmstate_s390_cpu;
209
cc->get_crash_info = s390_cpu_get_crash_info;
210
cc->write_elf64_note = s390_cpu_write_elf64_note;
211
#endif
212
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
213
index XXXXXXX..XXXXXXX 100644
214
--- a/target/sparc/cpu.c
215
+++ b/target/sparc/cpu.c
216
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
217
cc->gdb_write_register = sparc_cpu_gdb_write_register;
218
#ifndef CONFIG_USER_ONLY
219
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
220
- cc->vmsd = &vmstate_sparc_cpu;
221
+ cc->legacy_vmsd = &vmstate_sparc_cpu;
222
#endif
223
cc->disas_set_info = cpu_sparc_disas_set_info;
224
48
225
--
49
--
226
2.25.1
50
2.34.1
227
51
228
52
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Rather than saving MemoryRegionSection and offset,
2
save phys_addr and MemoryRegion. This matches up
3
much closer with the plugin api.
2
4
3
Only 2 headers require "exec/tb-context.h". Instead of having
5
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
4
all files including "exec/exec-all.h" also including it, directly
5
include it where it is required:
6
- accel/tcg/cpu-exec.c
7
- accel/tcg/translate-all.c
8
9
For plugins/plugin.h, we were implicitly relying on
10
exec/exec-all.h -> exec/tb-context.h -> qemu/qht.h
11
which is now included directly.
12
13
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
14
Message-Id: <20210524170453.3791436-2-f4bug@amsat.org>
15
[rth: Fix plugins/plugin.h compilation]
16
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
6
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
17
---
7
---
18
include/exec/exec-all.h | 1 -
8
include/qemu/plugin-memory.h | 11 ++---------
19
include/tcg/tcg.h | 1 -
9
accel/tcg/cputlb.c | 16 +++++++++-------
20
plugins/plugin.h | 1 +
10
plugins/api.c | 27 ++++++---------------------
21
accel/tcg/cpu-exec.c | 1 +
11
3 files changed, 17 insertions(+), 37 deletions(-)
22
accel/tcg/translate-all.c | 1 +
23
5 files changed, 3 insertions(+), 2 deletions(-)
24
12
25
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
13
diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h
26
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
27
--- a/include/exec/exec-all.h
15
--- a/include/qemu/plugin-memory.h
28
+++ b/include/exec/exec-all.h
16
+++ b/include/qemu/plugin-memory.h
29
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@
30
#define EXEC_ALL_H
18
struct qemu_plugin_hwaddr {
31
19
bool is_io;
32
#include "cpu.h"
20
bool is_store;
33
-#include "exec/tb-context.h"
21
- union {
34
#ifdef CONFIG_TCG
22
- struct {
35
#include "exec/cpu_ldst.h"
23
- MemoryRegionSection *section;
24
- hwaddr offset;
25
- } io;
26
- struct {
27
- void *hostaddr;
28
- } ram;
29
- } v;
30
+ hwaddr phys_addr;
31
+ MemoryRegion *mr;
32
};
33
34
/**
35
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/accel/tcg/cputlb.c
38
+++ b/accel/tcg/cputlb.c
39
@@ -XXX,XX +XXX,XX @@ bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
40
uintptr_t index = tlb_index(env, mmu_idx, addr);
41
MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
42
uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
43
+ CPUTLBEntryFull *full;
44
45
if (unlikely(!tlb_hit(tlb_addr, addr))) {
46
return false;
47
}
48
49
+ full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
50
+ data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
51
+
52
/* We must have an iotlb entry for MMIO */
53
if (tlb_addr & TLB_MMIO) {
54
- CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
55
- hwaddr xlat = full->xlat_section;
56
-
57
+ MemoryRegionSection *section =
58
+ iotlb_to_section(cpu, full->xlat_section & ~TARGET_PAGE_MASK,
59
+ full->attrs);
60
data->is_io = true;
61
- data->v.io.offset = (xlat & TARGET_PAGE_MASK) + addr;
62
- data->v.io.section =
63
- iotlb_to_section(cpu, xlat & ~TARGET_PAGE_MASK, full->attrs);
64
+ data->mr = section->mr;
65
} else {
66
data->is_io = false;
67
- data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
68
+ data->mr = NULL;
69
}
70
return true;
71
}
72
diff --git a/plugins/api.c b/plugins/api.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/plugins/api.c
75
+++ b/plugins/api.c
76
@@ -XXX,XX +XXX,XX @@ uint64_t qemu_plugin_hwaddr_phys_addr(const struct qemu_plugin_hwaddr *haddr)
77
{
78
#ifdef CONFIG_SOFTMMU
79
if (haddr) {
80
- if (!haddr->is_io) {
81
- RAMBlock *block;
82
- ram_addr_t offset;
83
- void *hostaddr = haddr->v.ram.hostaddr;
84
-
85
- block = qemu_ram_block_from_host(hostaddr, false, &offset);
86
- if (!block) {
87
- error_report("Bad host ram pointer %p", haddr->v.ram.hostaddr);
88
- abort();
89
- }
90
-
91
- return block->offset + offset + block->mr->addr;
92
- } else {
93
- MemoryRegionSection *mrs = haddr->v.io.section;
94
- return mrs->offset_within_address_space + haddr->v.io.offset;
95
- }
96
+ return haddr->phys_addr;
97
}
36
#endif
98
#endif
37
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
99
return 0;
38
index XXXXXXX..XXXXXXX 100644
100
@@ -XXX,XX +XXX,XX @@ const char *qemu_plugin_hwaddr_device_name(const struct qemu_plugin_hwaddr *h)
39
--- a/include/tcg/tcg.h
101
{
40
+++ b/include/tcg/tcg.h
102
#ifdef CONFIG_SOFTMMU
41
@@ -XXX,XX +XXX,XX @@
103
if (h && h->is_io) {
42
104
- MemoryRegionSection *mrs = h->v.io.section;
43
#include "cpu.h"
105
- if (!mrs->mr->name) {
44
#include "exec/memop.h"
106
- unsigned long maddr = 0xffffffff & (uintptr_t) mrs->mr;
45
-#include "exec/tb-context.h"
107
- g_autofree char *temp = g_strdup_printf("anon%08lx", maddr);
46
#include "qemu/bitops.h"
108
+ MemoryRegion *mr = h->mr;
47
#include "qemu/plugin.h"
109
+ if (!mr->name) {
48
#include "qemu/queue.h"
110
+ unsigned maddr = (uintptr_t)mr;
49
diff --git a/plugins/plugin.h b/plugins/plugin.h
111
+ g_autofree char *temp = g_strdup_printf("anon%08x", maddr);
50
index XXXXXXX..XXXXXXX 100644
112
return g_intern_string(temp);
51
--- a/plugins/plugin.h
113
} else {
52
+++ b/plugins/plugin.h
114
- return g_intern_string(mrs->mr->name);
53
@@ -XXX,XX +XXX,XX @@
115
+ return g_intern_string(mr->name);
54
#define _PLUGIN_INTERNAL_H_
116
}
55
117
} else {
56
#include <gmodule.h>
118
return g_intern_static_string("RAM");
57
+#include "qemu/qht.h"
58
59
#define QEMU_PLUGIN_MIN_VERSION 0
60
61
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
62
index XXXXXXX..XXXXXXX 100644
63
--- a/accel/tcg/cpu-exec.c
64
+++ b/accel/tcg/cpu-exec.c
65
@@ -XXX,XX +XXX,XX @@
66
#include "qemu/rcu.h"
67
#include "exec/tb-hash.h"
68
#include "exec/tb-lookup.h"
69
+#include "exec/tb-context.h"
70
#include "exec/log.h"
71
#include "qemu/main-loop.h"
72
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
73
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
74
index XXXXXXX..XXXXXXX 100644
75
--- a/accel/tcg/translate-all.c
76
+++ b/accel/tcg/translate-all.c
77
@@ -XXX,XX +XXX,XX @@
78
79
#include "exec/cputlb.h"
80
#include "exec/tb-hash.h"
81
+#include "exec/tb-context.h"
82
#include "exec/translate-all.h"
83
#include "qemu/bitmap.h"
84
#include "qemu/error-report.h"
85
--
119
--
86
2.25.1
120
2.34.1
87
121
88
122
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Push computation down into the if statements to the point
2
the data is used.
2
3
3
No code directly accesses CPUClass::write_elf*() handlers out
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
of hw/core/cpu.c (the rest are assignation in target/ code):
5
6
$ git grep -F -- '->write_elf'
7
hw/core/cpu.c:157: return (*cc->write_elf32_qemunote)(f, cpu, opaque);
8
hw/core/cpu.c:171: return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
9
hw/core/cpu.c:186: return (*cc->write_elf64_qemunote)(f, cpu, opaque);
10
hw/core/cpu.c:200: return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
11
hw/core/cpu.c:440: k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
12
hw/core/cpu.c:441: k->write_elf32_note = cpu_common_write_elf32_note;
13
hw/core/cpu.c:442: k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
14
hw/core/cpu.c:443: k->write_elf64_note = cpu_common_write_elf64_note;
15
target/arm/cpu.c:2304: cc->write_elf64_note = arm_cpu_write_elf64_note;
16
target/arm/cpu.c:2305: cc->write_elf32_note = arm_cpu_write_elf32_note;
17
target/i386/cpu.c:7425: cc->write_elf64_note = x86_cpu_write_elf64_note;
18
target/i386/cpu.c:7426: cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
19
target/i386/cpu.c:7427: cc->write_elf32_note = x86_cpu_write_elf32_note;
20
target/i386/cpu.c:7428: cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
21
target/ppc/translate_init.c.inc:10891: cc->write_elf64_note = ppc64_cpu_write_elf64_note;
22
target/ppc/translate_init.c.inc:10892: cc->write_elf32_note = ppc32_cpu_write_elf32_note;
23
target/s390x/cpu.c:522: cc->write_elf64_note = s390_cpu_write_elf64_note;
24
25
Check the handler presence in place and remove the common fallback code.
26
27
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Message-Id: <20210517105140.1062037-9-f4bug@amsat.org>
30
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
31
---
6
---
32
hw/core/cpu-common.c | 63 --------------------------------------------
7
accel/tcg/cputlb.c | 33 +++++++++++++--------------------
33
hw/core/cpu-sysemu.c | 44 +++++++++++++++++++++++++++++++
8
1 file changed, 13 insertions(+), 20 deletions(-)
34
2 files changed, 44 insertions(+), 63 deletions(-)
35
9
36
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
37
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
38
--- a/hw/core/cpu-common.c
12
--- a/accel/tcg/cputlb.c
39
+++ b/hw/core/cpu-common.c
13
+++ b/accel/tcg/cputlb.c
40
@@ -XXX,XX +XXX,XX @@ void cpu_exit(CPUState *cpu)
14
@@ -XXX,XX +XXX,XX @@ static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
41
qatomic_set(&cpu->icount_decr_ptr->u16.high, -1);
15
mmu_idx, retaddr);
42
}
16
}
43
17
44
-int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
18
-static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
45
- void *opaque)
19
- vaddr addr, unsigned size,
20
- MMUAccessType access_type,
21
- int mmu_idx, MemTxAttrs attrs,
22
- MemTxResult response,
23
- uintptr_t retaddr)
46
-{
24
-{
47
- CPUClass *cc = CPU_GET_CLASS(cpu);
25
- CPUClass *cc = CPU_GET_CLASS(cpu);
48
-
26
-
49
- return (*cc->write_elf32_qemunote)(f, cpu, opaque);
27
- if (!cpu->ignore_memory_transaction_failures &&
28
- cc->tcg_ops->do_transaction_failed) {
29
- cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
30
- access_type, mmu_idx, attrs,
31
- response, retaddr);
32
- }
50
-}
33
-}
51
-
34
-
52
-static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f,
35
static MemoryRegionSection *
53
- CPUState *cpu, void *opaque)
36
io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
54
-{
37
MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
55
- return 0;
38
@@ -XXX,XX +XXX,XX @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
56
-}
39
unsigned size, MMUAccessType access_type, int mmu_idx,
57
-
40
MemTxResult response, uintptr_t retaddr)
58
-int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
59
- int cpuid, void *opaque)
60
-{
61
- CPUClass *cc = CPU_GET_CLASS(cpu);
62
-
63
- return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
64
-}
65
-
66
-static int cpu_common_write_elf32_note(WriteCoreDumpFunction f,
67
- CPUState *cpu, int cpuid,
68
- void *opaque)
69
-{
70
- return -1;
71
-}
72
-
73
-int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
74
- void *opaque)
75
-{
76
- CPUClass *cc = CPU_GET_CLASS(cpu);
77
-
78
- return (*cc->write_elf64_qemunote)(f, cpu, opaque);
79
-}
80
-
81
-static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f,
82
- CPUState *cpu, void *opaque)
83
-{
84
- return 0;
85
-}
86
-
87
-int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
88
- int cpuid, void *opaque)
89
-{
90
- CPUClass *cc = CPU_GET_CLASS(cpu);
91
-
92
- return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
93
-}
94
-
95
-static int cpu_common_write_elf64_note(WriteCoreDumpFunction f,
96
- CPUState *cpu, int cpuid,
97
- void *opaque)
98
-{
99
- return -1;
100
-}
101
-
102
-
103
static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
104
{
41
{
105
return 0;
42
- hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
106
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
43
- cpu_transaction_failed(env_cpu(env), physaddr, addr, size, access_type,
107
k->has_work = cpu_common_has_work;
44
- mmu_idx, full->attrs, response, retaddr);
108
k->get_paging_enabled = cpu_common_get_paging_enabled;
45
+ CPUState *cpu = env_cpu(env);
109
k->get_memory_mapping = cpu_common_get_memory_mapping;
46
+
110
- k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
47
+ if (!cpu->ignore_memory_transaction_failures) {
111
- k->write_elf32_note = cpu_common_write_elf32_note;
48
+ CPUClass *cc = CPU_GET_CLASS(cpu);
112
- k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
49
+
113
- k->write_elf64_note = cpu_common_write_elf64_note;
50
+ if (cc->tcg_ops->do_transaction_failed) {
114
k->gdb_read_register = cpu_common_gdb_read_register;
51
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
115
k->gdb_write_register = cpu_common_gdb_write_register;
52
+
116
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
53
+ cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
117
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
54
+ access_type, mmu_idx,
118
index XXXXXXX..XXXXXXX 100644
55
+ full->attrs, response, retaddr);
119
--- a/hw/core/cpu-sysemu.c
56
+ }
120
+++ b/hw/core/cpu-sysemu.c
57
+ }
121
@@ -XXX,XX +XXX,XX @@ int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
122
return ret;
123
}
58
}
124
59
125
+int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
60
static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
126
+ void *opaque)
127
+{
128
+ CPUClass *cc = CPU_GET_CLASS(cpu);
129
+
130
+ if (!cc->write_elf32_qemunote) {
131
+ return 0;
132
+ }
133
+ return (*cc->write_elf32_qemunote)(f, cpu, opaque);
134
+}
135
+
136
+int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
137
+ int cpuid, void *opaque)
138
+{
139
+ CPUClass *cc = CPU_GET_CLASS(cpu);
140
+
141
+ if (!cc->write_elf32_note) {
142
+ return -1;
143
+ }
144
+ return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
145
+}
146
+
147
+int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
148
+ void *opaque)
149
+{
150
+ CPUClass *cc = CPU_GET_CLASS(cpu);
151
+
152
+ if (!cc->write_elf64_qemunote) {
153
+ return 0;
154
+ }
155
+ return (*cc->write_elf64_qemunote)(f, cpu, opaque);
156
+}
157
+
158
+int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
159
+ int cpuid, void *opaque)
160
+{
161
+ CPUClass *cc = CPU_GET_CLASS(cpu);
162
+
163
+ if (!cc->write_elf64_note) {
164
+ return -1;
165
+ }
166
+ return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
167
+}
168
+
169
bool cpu_virtio_is_big_endian(CPUState *cpu)
170
{
171
CPUClass *cc = CPU_GET_CLASS(cpu);
172
--
61
--
173
2.25.1
62
2.34.1
174
63
175
64
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
2
3
Only the TCG accelerator uses the TranslationBlock API.
4
Move the tb-context.h / tb-hash.h / tb-lookup.h from the
5
global namespace to the TCG one (in accel/tcg).
6
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Message-Id: <20210524170453.3791436-3-f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
1
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
2
---
11
{include/exec => accel/tcg}/tb-context.h | 0
3
accel/tcg/cputlb.c | 6 ++++--
12
{include/exec => accel/tcg}/tb-hash.h | 0
4
1 file changed, 4 insertions(+), 2 deletions(-)
13
{include/exec => accel/tcg}/tb-lookup.h | 2 +-
14
accel/tcg/cpu-exec.c | 6 +++---
15
accel/tcg/cputlb.c | 2 +-
16
accel/tcg/tcg-runtime.c | 2 +-
17
accel/tcg/translate-all.c | 4 ++--
18
MAINTAINERS | 1 -
19
8 files changed, 8 insertions(+), 9 deletions(-)
20
rename {include/exec => accel/tcg}/tb-context.h (100%)
21
rename {include/exec => accel/tcg}/tb-hash.h (100%)
22
rename {include/exec => accel/tcg}/tb-lookup.h (98%)
23
5
24
diff --git a/include/exec/tb-context.h b/accel/tcg/tb-context.h
25
similarity index 100%
26
rename from include/exec/tb-context.h
27
rename to accel/tcg/tb-context.h
28
diff --git a/include/exec/tb-hash.h b/accel/tcg/tb-hash.h
29
similarity index 100%
30
rename from include/exec/tb-hash.h
31
rename to accel/tcg/tb-hash.h
32
diff --git a/include/exec/tb-lookup.h b/accel/tcg/tb-lookup.h
33
similarity index 98%
34
rename from include/exec/tb-lookup.h
35
rename to accel/tcg/tb-lookup.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/include/exec/tb-lookup.h
38
+++ b/accel/tcg/tb-lookup.h
39
@@ -XXX,XX +XXX,XX @@
40
#endif
41
42
#include "exec/exec-all.h"
43
-#include "exec/tb-hash.h"
44
+#include "tb-hash.h"
45
46
/* Might cause an exception, so have a longjmp destination ready */
47
static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
48
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
49
index XXXXXXX..XXXXXXX 100644
50
--- a/accel/tcg/cpu-exec.c
51
+++ b/accel/tcg/cpu-exec.c
52
@@ -XXX,XX +XXX,XX @@
53
#include "qemu/compiler.h"
54
#include "qemu/timer.h"
55
#include "qemu/rcu.h"
56
-#include "exec/tb-hash.h"
57
-#include "exec/tb-lookup.h"
58
-#include "exec/tb-context.h"
59
#include "exec/log.h"
60
#include "qemu/main-loop.h"
61
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
62
@@ -XXX,XX +XXX,XX @@
63
#include "exec/cpu-all.h"
64
#include "sysemu/cpu-timers.h"
65
#include "sysemu/replay.h"
66
+#include "tb-hash.h"
67
+#include "tb-lookup.h"
68
+#include "tb-context.h"
69
#include "internal.h"
70
71
/* -icount align implementation. */
72
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
6
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
73
index XXXXXXX..XXXXXXX 100644
7
index XXXXXXX..XXXXXXX 100644
74
--- a/accel/tcg/cputlb.c
8
--- a/accel/tcg/cputlb.c
75
+++ b/accel/tcg/cputlb.c
9
+++ b/accel/tcg/cputlb.c
76
@@ -XXX,XX +XXX,XX @@
10
@@ -XXX,XX +XXX,XX @@ static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
77
#include "exec/memory.h"
11
MMUAccessType type, uintptr_t ra)
78
#include "exec/cpu_ldst.h"
12
{
79
#include "exec/cputlb.h"
13
if (unlikely(p->flags & TLB_MMIO)) {
80
-#include "exec/tb-hash.h"
14
- return io_readx(env, p->full, mmu_idx, p->addr, ra, type, MO_UB);
81
#include "exec/memory-internal.h"
15
+ QEMU_IOTHREAD_LOCK_GUARD();
82
#include "exec/ram_addr.h"
16
+ return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
83
#include "tcg/tcg.h"
17
} else {
84
@@ -XXX,XX +XXX,XX @@
18
return *(uint8_t *)p->haddr;
85
#include "exec/translate-all.h"
19
}
86
#include "trace/trace-root.h"
20
@@ -XXX,XX +XXX,XX @@ static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
87
#include "trace/mem.h"
21
int mmu_idx, uintptr_t ra)
88
+#include "tb-hash.h"
22
{
89
#include "internal.h"
23
if (unlikely(p->flags & TLB_MMIO)) {
90
#ifdef CONFIG_PLUGIN
24
- io_writex(env, p->full, mmu_idx, val, p->addr, ra, MO_UB);
91
#include "qemu/plugin-memory.h"
25
+ QEMU_IOTHREAD_LOCK_GUARD();
92
diff --git a/accel/tcg/tcg-runtime.c b/accel/tcg/tcg-runtime.c
26
+ do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
93
index XXXXXXX..XXXXXXX 100644
27
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
94
--- a/accel/tcg/tcg-runtime.c
28
/* nothing */
95
+++ b/accel/tcg/tcg-runtime.c
29
} else {
96
@@ -XXX,XX +XXX,XX @@
97
#include "disas/disas.h"
98
#include "exec/log.h"
99
#include "tcg/tcg.h"
100
-#include "exec/tb-lookup.h"
101
+#include "tb-lookup.h"
102
103
/* 32-bit helpers */
104
105
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
106
index XXXXXXX..XXXXXXX 100644
107
--- a/accel/tcg/translate-all.c
108
+++ b/accel/tcg/translate-all.c
109
@@ -XXX,XX +XXX,XX @@
110
#endif
111
112
#include "exec/cputlb.h"
113
-#include "exec/tb-hash.h"
114
-#include "exec/tb-context.h"
115
#include "exec/translate-all.h"
116
#include "qemu/bitmap.h"
117
#include "qemu/error-report.h"
118
@@ -XXX,XX +XXX,XX @@
119
#include "sysemu/tcg.h"
120
#include "qapi/error.h"
121
#include "hw/core/tcg-cpu-ops.h"
122
+#include "tb-hash.h"
123
+#include "tb-context.h"
124
#include "internal.h"
125
126
/* #define DEBUG_TB_INVALIDATE */
127
diff --git a/MAINTAINERS b/MAINTAINERS
128
index XXXXXXX..XXXXXXX 100644
129
--- a/MAINTAINERS
130
+++ b/MAINTAINERS
131
@@ -XXX,XX +XXX,XX @@ F: docs/devel/decodetree.rst
132
F: include/exec/cpu*.h
133
F: include/exec/exec-all.h
134
F: include/exec/helper*.h
135
-F: include/exec/tb-hash.h
136
F: include/sysemu/cpus.h
137
F: include/sysemu/tcg.h
138
F: include/hw/core/tcg-cpu-ops.h
139
--
30
--
140
2.25.1
31
2.34.1
141
142
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Avoid multiple calls to io_prepare for unaligned acceses.
2
One call to do_ld_mmio_beN will never cross pages.
2
3
3
Introduce the cpu_virtio_is_big_endian() generic helper to avoid
4
calling CPUClass internal virtio_is_big_endian() one.
5
6
Similarly to commit bf7663c4bd8 ("cpu: introduce
7
CPUClass::virtio_is_big_endian()"), we keep 'virtio' in the method
8
name to hint this handler shouldn't be called anywhere but from the
9
virtio code.
10
11
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
12
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-Id: <20210517105140.1062037-8-f4bug@amsat.org>
14
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
15
---
5
---
16
include/hw/core/cpu.h | 9 +++++++++
6
accel/tcg/cputlb.c | 84 +++++++++++++++++-----------------------------
17
hw/core/cpu-common.c | 6 ------
7
1 file changed, 30 insertions(+), 54 deletions(-)
18
hw/core/cpu-sysemu.c | 10 ++++++++++
19
hw/virtio/virtio.c | 4 +---
20
4 files changed, 20 insertions(+), 9 deletions(-)
21
8
22
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
9
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
23
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
24
--- a/include/hw/core/cpu.h
11
--- a/accel/tcg/cputlb.c
25
+++ b/include/hw/core/cpu.h
12
+++ b/accel/tcg/cputlb.c
26
@@ -XXX,XX +XXX,XX @@ hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
13
@@ -XXX,XX +XXX,XX @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
27
*/
14
}
28
int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
29
30
+/**
31
+ * cpu_virtio_is_big_endian:
32
+ * @cpu: CPU
33
+
34
+ * Returns %true if a CPU which supports runtime configurable endianness
35
+ * is currently big-endian.
36
+ */
37
+bool cpu_virtio_is_big_endian(CPUState *cpu);
38
+
39
#endif /* CONFIG_USER_ONLY */
40
41
/**
42
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/hw/core/cpu-common.c
45
+++ b/hw/core/cpu-common.c
46
@@ -XXX,XX +XXX,XX @@ static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
47
return 0;
48
}
15
}
49
16
50
-static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
17
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
18
- int mmu_idx, vaddr addr, uintptr_t retaddr,
19
- MMUAccessType access_type, MemOp op)
51
-{
20
-{
52
- return target_words_bigendian();
21
- MemoryRegionSection *section;
22
- hwaddr mr_offset;
23
- MemoryRegion *mr;
24
- MemTxResult r;
25
- uint64_t val;
26
-
27
- section = io_prepare(&mr_offset, env, full->xlat_section,
28
- full->attrs, addr, retaddr);
29
- mr = section->mr;
30
-
31
- {
32
- QEMU_IOTHREAD_LOCK_GUARD();
33
- r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
34
- }
35
-
36
- if (r != MEMTX_OK) {
37
- io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
38
- r, retaddr);
39
- }
40
- return val;
53
-}
41
-}
54
-
42
-
55
void cpu_dump_state(CPUState *cpu, FILE *f, int flags)
43
static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
44
int mmu_idx, uint64_t val, vaddr addr,
45
uintptr_t retaddr, MemOp op)
46
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
47
uint64_t ret_be, vaddr addr, int size,
48
int mmu_idx, MMUAccessType type, uintptr_t ra)
56
{
49
{
57
CPUClass *cc = CPU_GET_CLASS(cpu);
50
- uint64_t t;
58
@@ -XXX,XX +XXX,XX @@ static void cpu_class_init(ObjectClass *klass, void *data)
51
+ MemoryRegionSection *section;
59
k->write_elf64_note = cpu_common_write_elf64_note;
52
+ hwaddr mr_offset;
60
k->gdb_read_register = cpu_common_gdb_read_register;
53
+ MemoryRegion *mr;
61
k->gdb_write_register = cpu_common_gdb_write_register;
54
+ MemTxAttrs attrs;
62
- k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
55
63
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
56
tcg_debug_assert(size > 0 && size <= 8);
64
dc->realize = cpu_common_realizefn;
57
+
65
dc->unrealize = cpu_common_unrealizefn;
58
+ attrs = full->attrs;
66
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
59
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
67
index XXXXXXX..XXXXXXX 100644
60
+ mr = section->mr;
68
--- a/hw/core/cpu-sysemu.c
61
+
69
+++ b/hw/core/cpu-sysemu.c
62
do {
70
@@ -XXX,XX +XXX,XX @@ int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
63
+ MemOp this_mop;
71
return ret;
64
+ unsigned this_size;
65
+ uint64_t val;
66
+ MemTxResult r;
67
+
68
/* Read aligned pieces up to 8 bytes. */
69
- switch ((size | (int)addr) & 7) {
70
- case 1:
71
- case 3:
72
- case 5:
73
- case 7:
74
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_UB);
75
- ret_be = (ret_be << 8) | t;
76
- size -= 1;
77
- addr += 1;
78
- break;
79
- case 2:
80
- case 6:
81
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUW);
82
- ret_be = (ret_be << 16) | t;
83
- size -= 2;
84
- addr += 2;
85
- break;
86
- case 4:
87
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUL);
88
- ret_be = (ret_be << 32) | t;
89
- size -= 4;
90
- addr += 4;
91
- break;
92
- case 0:
93
- return io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUQ);
94
- default:
95
- qemu_build_not_reached();
96
+ this_mop = ctz32(size | (int)addr | 8);
97
+ this_size = 1 << this_mop;
98
+ this_mop |= MO_BE;
99
+
100
+ r = memory_region_dispatch_read(mr, mr_offset, &val, this_mop, attrs);
101
+ if (unlikely(r != MEMTX_OK)) {
102
+ io_failed(env, full, addr, this_size, type, mmu_idx, r, ra);
103
}
104
+ if (this_size == 8) {
105
+ return val;
106
+ }
107
+
108
+ ret_be = (ret_be << (this_size * 8)) | val;
109
+ addr += this_size;
110
+ mr_offset += this_size;
111
+ size -= this_size;
112
} while (size);
113
+
114
return ret_be;
72
}
115
}
73
116
74
+bool cpu_virtio_is_big_endian(CPUState *cpu)
75
+{
76
+ CPUClass *cc = CPU_GET_CLASS(cpu);
77
+
78
+ if (cc->virtio_is_big_endian) {
79
+ return cc->virtio_is_big_endian(cpu);
80
+ }
81
+ return target_words_bigendian();
82
+}
83
+
84
GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
85
{
86
CPUClass *cc = CPU_GET_CLASS(cpu);
87
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
88
index XXXXXXX..XXXXXXX 100644
89
--- a/hw/virtio/virtio.c
90
+++ b/hw/virtio/virtio.c
91
@@ -XXX,XX +XXX,XX @@ static enum virtio_device_endian virtio_default_endian(void)
92
93
static enum virtio_device_endian virtio_current_cpu_endian(void)
94
{
95
- CPUClass *cc = CPU_GET_CLASS(current_cpu);
96
-
97
- if (cc->virtio_is_big_endian(current_cpu)) {
98
+ if (cpu_virtio_is_big_endian(current_cpu)) {
99
return VIRTIO_DEVICE_ENDIAN_BIG;
100
} else {
101
return VIRTIO_DEVICE_ENDIAN_LITTLE;
102
--
117
--
103
2.25.1
118
2.34.1
104
105
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
Avoid multiple calls to io_prepare for unaligned acceses.
2
One call to do_st_mmio_leN will never cross pages.
2
3
3
To ease the file review, sort the declarations by the size of
4
the access (8, 16, 32). Simple code movement, no logical change.
5
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
7
Message-Id: <20210518183655.1711377-3-philmd@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
4
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
5
---
10
include/exec/memory_ldst_phys.h.inc | 78 ++++++++++++++---------------
6
accel/tcg/cputlb.c | 82 +++++++++++++++++-----------------------------
11
1 file changed, 39 insertions(+), 39 deletions(-)
7
1 file changed, 30 insertions(+), 52 deletions(-)
12
8
13
diff --git a/include/exec/memory_ldst_phys.h.inc b/include/exec/memory_ldst_phys.h.inc
9
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
14
index XXXXXXX..XXXXXXX 100644
10
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/memory_ldst_phys.h.inc
11
--- a/accel/tcg/cputlb.c
16
+++ b/include/exec/memory_ldst_phys.h.inc
12
+++ b/accel/tcg/cputlb.c
17
@@ -XXX,XX +XXX,XX @@
13
@@ -XXX,XX +XXX,XX @@ static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
18
*/
14
}
19
20
#ifdef TARGET_ENDIANNESS
21
+static inline uint32_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
22
+{
23
+ return glue(address_space_lduw, SUFFIX)(ARG1, addr,
24
+ MEMTXATTRS_UNSPECIFIED, NULL);
25
+}
26
+
27
static inline uint32_t glue(ldl_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
28
{
29
return glue(address_space_ldl, SUFFIX)(ARG1, addr,
30
@@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(ldq_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
31
MEMTXATTRS_UNSPECIFIED, NULL);
32
}
15
}
33
16
34
-static inline uint32_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
17
-static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
35
+static inline void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
18
- int mmu_idx, uint64_t val, vaddr addr,
36
{
19
- uintptr_t retaddr, MemOp op)
37
- return glue(address_space_lduw, SUFFIX)(ARG1, addr,
38
- MEMTXATTRS_UNSPECIFIED, NULL);
39
+ glue(address_space_stw, SUFFIX)(ARG1, addr, val,
40
+ MEMTXATTRS_UNSPECIFIED, NULL);
41
}
42
43
static inline void glue(stl_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
44
@@ -XXX,XX +XXX,XX @@ static inline void glue(stl_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
45
MEMTXATTRS_UNSPECIFIED, NULL);
46
}
47
48
-static inline void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
49
-{
20
-{
50
- glue(address_space_stw, SUFFIX)(ARG1, addr, val,
21
- MemoryRegionSection *section;
51
- MEMTXATTRS_UNSPECIFIED, NULL);
22
- hwaddr mr_offset;
23
- MemoryRegion *mr;
24
- MemTxResult r;
25
-
26
- section = io_prepare(&mr_offset, env, full->xlat_section,
27
- full->attrs, addr, retaddr);
28
- mr = section->mr;
29
-
30
- {
31
- QEMU_IOTHREAD_LOCK_GUARD();
32
- r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
33
- }
34
-
35
- if (r != MEMTX_OK) {
36
- io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
37
- r, retaddr);
38
- }
52
-}
39
-}
53
-
40
-
54
static inline void glue(stq_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
41
/* Return true if ADDR is present in the victim tlb, and has been copied
42
back to the main tlb. */
43
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
44
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
45
uint64_t val_le, vaddr addr, int size,
46
int mmu_idx, uintptr_t ra)
55
{
47
{
56
glue(address_space_stq, SUFFIX)(ARG1, addr, val,
48
+ MemoryRegionSection *section;
57
MEMTXATTRS_UNSPECIFIED, NULL);
49
+ hwaddr mr_offset;
58
}
50
+ MemoryRegion *mr;
59
#else
51
+ MemTxAttrs attrs;
60
+static inline uint32_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
61
+{
62
+ return glue(address_space_ldub, SUFFIX)(ARG1, addr,
63
+ MEMTXATTRS_UNSPECIFIED, NULL);
64
+}
65
+
52
+
66
+static inline uint32_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
53
tcg_debug_assert(size > 0 && size <= 8);
67
+{
54
68
+ return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
55
+ attrs = full->attrs;
69
+ MEMTXATTRS_UNSPECIFIED, NULL);
56
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
70
+}
57
+ mr = section->mr;
71
+
58
+
72
+static inline uint32_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
59
do {
73
+{
60
+ MemOp this_mop;
74
+ return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
61
+ unsigned this_size;
75
+ MEMTXATTRS_UNSPECIFIED, NULL);
62
+ MemTxResult r;
76
+}
77
+
63
+
78
static inline uint32_t glue(ldl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
64
/* Store aligned pieces up to 8 bytes. */
79
{
65
- switch ((size | (int)addr) & 7) {
80
return glue(address_space_ldl_le, SUFFIX)(ARG1, addr,
66
- case 1:
81
@@ -XXX,XX +XXX,XX @@ static inline uint64_t glue(ldq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
67
- case 3:
82
MEMTXATTRS_UNSPECIFIED, NULL);
68
- case 5:
83
}
69
- case 7:
84
70
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_UB);
85
-static inline uint32_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
71
- val_le >>= 8;
86
-{
72
- size -= 1;
87
- return glue(address_space_ldub, SUFFIX)(ARG1, addr,
73
- addr += 1;
88
- MEMTXATTRS_UNSPECIFIED, NULL);
74
- break;
89
-}
75
- case 2:
90
-
76
- case 6:
91
-static inline uint32_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
77
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUW);
92
-{
78
- val_le >>= 16;
93
- return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
79
- size -= 2;
94
- MEMTXATTRS_UNSPECIFIED, NULL);
80
- addr += 2;
95
-}
81
- break;
96
-
82
- case 4:
97
-static inline uint32_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
83
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUL);
98
-{
84
- val_le >>= 32;
99
- return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
85
- size -= 4;
100
- MEMTXATTRS_UNSPECIFIED, NULL);
86
- addr += 4;
101
-}
87
- break;
102
-
88
- case 0:
103
-static inline void glue(stl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
89
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUQ);
104
-{
90
- return 0;
105
- glue(address_space_stl_le, SUFFIX)(ARG1, addr, val,
91
- default:
106
- MEMTXATTRS_UNSPECIFIED, NULL);
92
- qemu_build_not_reached();
107
-}
93
+ this_mop = ctz32(size | (int)addr | 8);
108
-
94
+ this_size = 1 << this_mop;
109
-static inline void glue(stl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
95
+ this_mop |= MO_LE;
110
-{
111
- glue(address_space_stl_be, SUFFIX)(ARG1, addr, val,
112
- MEMTXATTRS_UNSPECIFIED, NULL);
113
-}
114
-
115
static inline void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
116
{
117
glue(address_space_stb, SUFFIX)(ARG1, addr, val,
118
@@ -XXX,XX +XXX,XX @@ static inline void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t va
119
MEMTXATTRS_UNSPECIFIED, NULL);
120
}
121
122
+static inline void glue(stl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
123
+{
124
+ glue(address_space_stl_le, SUFFIX)(ARG1, addr, val,
125
+ MEMTXATTRS_UNSPECIFIED, NULL);
126
+}
127
+
96
+
128
+static inline void glue(stl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
97
+ r = memory_region_dispatch_write(mr, mr_offset, val_le,
129
+{
98
+ this_mop, attrs);
130
+ glue(address_space_stl_be, SUFFIX)(ARG1, addr, val,
99
+ if (unlikely(r != MEMTX_OK)) {
131
+ MEMTXATTRS_UNSPECIFIED, NULL);
100
+ io_failed(env, full, addr, this_size, MMU_DATA_STORE,
132
+}
101
+ mmu_idx, r, ra);
102
}
103
+ if (this_size == 8) {
104
+ return 0;
105
+ }
133
+
106
+
134
static inline void glue(stq_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
107
+ val_le >>= this_size * 8;
135
{
108
+ addr += this_size;
136
glue(address_space_stq_le, SUFFIX)(ARG1, addr, val,
109
+ mr_offset += this_size;
110
+ size -= this_size;
111
} while (size);
112
113
return val_le;
137
--
114
--
138
2.25.1
115
2.34.1
139
140
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
Split out int_ld_mmio_beN, to be used by both do_ld_mmio_beN
2
and do_ld16_mmio_beN. Move the locks down into the two
3
functions, since each one now covers all accesses to once page.
2
4
3
Use uint8_t for (unsigned) byte.
4
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
6
Message-Id: <20210518183655.1711377-7-philmd@redhat.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
6
---
9
include/exec/memory.h | 2 +-
7
accel/tcg/cputlb.c | 91 ++++++++++++++++++++++++++++++----------------
10
1 file changed, 1 insertion(+), 1 deletion(-)
8
1 file changed, 59 insertions(+), 32 deletions(-)
11
9
12
diff --git a/include/exec/memory.h b/include/exec/memory.h
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
13
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/memory.h
12
--- a/accel/tcg/cputlb.c
15
+++ b/include/exec/memory.h
13
+++ b/accel/tcg/cputlb.c
16
@@ -XXX,XX +XXX,XX @@ static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
14
@@ -XXX,XX +XXX,XX @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
15
* Load @size bytes from @addr, which is memory-mapped i/o.
16
* The bytes are concatenated in big-endian order with @ret_be.
17
*/
18
-static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
19
- uint64_t ret_be, vaddr addr, int size,
20
- int mmu_idx, MMUAccessType type, uintptr_t ra)
21
+static uint64_t int_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
22
+ uint64_t ret_be, vaddr addr, int size,
23
+ int mmu_idx, MMUAccessType type, uintptr_t ra,
24
+ MemoryRegion *mr, hwaddr mr_offset)
25
{
26
- MemoryRegionSection *section;
27
- hwaddr mr_offset;
28
- MemoryRegion *mr;
29
- MemTxAttrs attrs;
30
-
31
- tcg_debug_assert(size > 0 && size <= 8);
32
-
33
- attrs = full->attrs;
34
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
35
- mr = section->mr;
36
-
37
do {
38
MemOp this_mop;
39
unsigned this_size;
40
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
41
this_size = 1 << this_mop;
42
this_mop |= MO_BE;
43
44
- r = memory_region_dispatch_read(mr, mr_offset, &val, this_mop, attrs);
45
+ r = memory_region_dispatch_read(mr, mr_offset, &val,
46
+ this_mop, full->attrs);
47
if (unlikely(r != MEMTX_OK)) {
48
io_failed(env, full, addr, this_size, type, mmu_idx, r, ra);
49
}
50
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
51
return ret_be;
17
}
52
}
18
53
19
static inline void address_space_stb_cached(MemoryRegionCache *cache,
54
+static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
20
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
55
+ uint64_t ret_be, vaddr addr, int size,
21
+ hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
56
+ int mmu_idx, MMUAccessType type, uintptr_t ra)
57
+{
58
+ MemoryRegionSection *section;
59
+ MemoryRegion *mr;
60
+ hwaddr mr_offset;
61
+ MemTxAttrs attrs;
62
+ uint64_t ret;
63
+
64
+ tcg_debug_assert(size > 0 && size <= 8);
65
+
66
+ attrs = full->attrs;
67
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
68
+ mr = section->mr;
69
+
70
+ qemu_mutex_lock_iothread();
71
+ ret = int_ld_mmio_beN(env, full, ret_be, addr, size, mmu_idx,
72
+ type, ra, mr, mr_offset);
73
+ qemu_mutex_unlock_iothread();
74
+
75
+ return ret;
76
+}
77
+
78
+static Int128 do_ld16_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
79
+ uint64_t ret_be, vaddr addr, int size,
80
+ int mmu_idx, uintptr_t ra)
81
+{
82
+ MemoryRegionSection *section;
83
+ MemoryRegion *mr;
84
+ hwaddr mr_offset;
85
+ MemTxAttrs attrs;
86
+ uint64_t a, b;
87
+
88
+ tcg_debug_assert(size > 8 && size <= 16);
89
+
90
+ attrs = full->attrs;
91
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
92
+ mr = section->mr;
93
+
94
+ qemu_mutex_lock_iothread();
95
+ a = int_ld_mmio_beN(env, full, ret_be, addr, size - 8, mmu_idx,
96
+ MMU_DATA_LOAD, ra, mr, mr_offset);
97
+ b = int_ld_mmio_beN(env, full, ret_be, addr + size - 8, 8, mmu_idx,
98
+ MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
99
+ qemu_mutex_unlock_iothread();
100
+
101
+ return int128_make128(b, a);
102
+}
103
+
104
/**
105
* do_ld_bytes_beN
106
* @p: translation parameters
107
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
108
unsigned tmp, half_size;
109
110
if (unlikely(p->flags & TLB_MMIO)) {
111
- QEMU_IOTHREAD_LOCK_GUARD();
112
return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size,
113
mmu_idx, type, ra);
114
}
115
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
116
MemOp atom;
117
118
if (unlikely(p->flags & TLB_MMIO)) {
119
- QEMU_IOTHREAD_LOCK_GUARD();
120
- a = do_ld_mmio_beN(env, p->full, a, p->addr, size - 8,
121
- mmu_idx, MMU_DATA_LOAD, ra);
122
- b = do_ld_mmio_beN(env, p->full, 0, p->addr + 8, 8,
123
- mmu_idx, MMU_DATA_LOAD, ra);
124
- return int128_make128(b, a);
125
+ return do_ld16_mmio_beN(env, p->full, a, p->addr, size, mmu_idx, ra);
126
}
127
128
/*
129
@@ -XXX,XX +XXX,XX @@ static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
130
MMUAccessType type, uintptr_t ra)
22
{
131
{
23
assert(addr < cache->len);
132
if (unlikely(p->flags & TLB_MMIO)) {
24
if (likely(cache->ptr)) {
133
- QEMU_IOTHREAD_LOCK_GUARD();
134
return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
135
} else {
136
return *(uint8_t *)p->haddr;
137
@@ -XXX,XX +XXX,XX @@ static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
138
uint16_t ret;
139
140
if (unlikely(p->flags & TLB_MMIO)) {
141
- QEMU_IOTHREAD_LOCK_GUARD();
142
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra);
143
if ((memop & MO_BSWAP) == MO_LE) {
144
ret = bswap16(ret);
145
@@ -XXX,XX +XXX,XX @@ static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
146
uint32_t ret;
147
148
if (unlikely(p->flags & TLB_MMIO)) {
149
- QEMU_IOTHREAD_LOCK_GUARD();
150
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra);
151
if ((memop & MO_BSWAP) == MO_LE) {
152
ret = bswap32(ret);
153
@@ -XXX,XX +XXX,XX @@ static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
154
uint64_t ret;
155
156
if (unlikely(p->flags & TLB_MMIO)) {
157
- QEMU_IOTHREAD_LOCK_GUARD();
158
ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra);
159
if ((memop & MO_BSWAP) == MO_LE) {
160
ret = bswap64(ret);
161
@@ -XXX,XX +XXX,XX @@ static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
162
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
163
if (likely(!crosspage)) {
164
if (unlikely(l.page[0].flags & TLB_MMIO)) {
165
- QEMU_IOTHREAD_LOCK_GUARD();
166
- a = do_ld_mmio_beN(env, l.page[0].full, 0, addr, 8,
167
- l.mmu_idx, MMU_DATA_LOAD, ra);
168
- b = do_ld_mmio_beN(env, l.page[0].full, 0, addr + 8, 8,
169
- l.mmu_idx, MMU_DATA_LOAD, ra);
170
- ret = int128_make128(b, a);
171
+ ret = do_ld16_mmio_beN(env, l.page[0].full, 0, addr, 16,
172
+ l.mmu_idx, ra);
173
if ((l.memop & MO_BSWAP) == MO_LE) {
174
ret = bswap128(ret);
175
}
25
--
176
--
26
2.25.1
177
2.34.1
27
28
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Split out int_st_mmio_leN, to be used by both do_st_mmio_leN
2
and do_st16_mmio_leN. Move the locks down into the two
3
functions, since each one now covers all accesses to once page.
2
4
3
To be able to later extract the cpu_get_phys_page_debug() and
4
cpu_asidx_from_attrs() handlers from CPUClass, un-inline them
5
from "hw/core/cpu.h".
6
7
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-Id: <20210517105140.1062037-7-f4bug@amsat.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
include/hw/core/cpu.h | 33 ++++-----------------------------
7
accel/tcg/cputlb.c | 88 ++++++++++++++++++++++++++++++----------------
13
hw/core/cpu-sysemu.c | 32 ++++++++++++++++++++++++++++++++
8
1 file changed, 58 insertions(+), 30 deletions(-)
14
2 files changed, 36 insertions(+), 29 deletions(-)
15
9
16
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
10
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
17
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
18
--- a/include/hw/core/cpu.h
12
--- a/accel/tcg/cputlb.c
19
+++ b/include/hw/core/cpu.h
13
+++ b/accel/tcg/cputlb.c
20
@@ -XXX,XX +XXX,XX @@ void cpu_dump_statistics(CPUState *cpu, int flags);
14
@@ -XXX,XX +XXX,XX @@ Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
21
*
15
* The bytes to store are extracted in little-endian order from @val_le;
22
* Returns: Corresponding physical page address or -1 if no page found.
16
* return the bytes of @val_le beyond @p->size that have not been stored.
23
*/
17
*/
24
-static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
18
-static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
25
- MemTxAttrs *attrs)
19
- uint64_t val_le, vaddr addr, int size,
26
-{
20
- int mmu_idx, uintptr_t ra)
27
- CPUClass *cc = CPU_GET_CLASS(cpu);
21
+static uint64_t int_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
22
+ uint64_t val_le, vaddr addr, int size,
23
+ int mmu_idx, uintptr_t ra,
24
+ MemoryRegion *mr, hwaddr mr_offset)
25
{
26
- MemoryRegionSection *section;
27
- hwaddr mr_offset;
28
- MemoryRegion *mr;
29
- MemTxAttrs attrs;
28
-
30
-
29
- if (cc->get_phys_page_attrs_debug) {
31
- tcg_debug_assert(size > 0 && size <= 8);
30
- return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
31
- }
32
- /* Fallback for CPUs which don't implement the _attrs_ hook */
33
- *attrs = MEMTXATTRS_UNSPECIFIED;
34
- return cc->get_phys_page_debug(cpu, addr);
35
-}
36
+hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
37
+ MemTxAttrs *attrs);
38
39
/**
40
* cpu_get_phys_page_debug:
41
@@ -XXX,XX +XXX,XX @@ static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
42
*
43
* Returns: Corresponding physical page address or -1 if no page found.
44
*/
45
-static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
46
-{
47
- MemTxAttrs attrs = {};
48
-
32
-
49
- return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
33
- attrs = full->attrs;
50
-}
34
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
51
+hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
35
- mr = section->mr;
52
53
/** cpu_asidx_from_attrs:
54
* @cpu: CPU
55
@@ -XXX,XX +XXX,XX @@ static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
56
* Returns the address space index specifying the CPU AddressSpace
57
* to use for a memory access with the given transaction attributes.
58
*/
59
-static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
60
-{
61
- CPUClass *cc = CPU_GET_CLASS(cpu);
62
- int ret = 0;
63
-
36
-
64
- if (cc->asidx_from_attrs) {
37
do {
65
- ret = cc->asidx_from_attrs(cpu, attrs);
38
MemOp this_mop;
66
- assert(ret < cpu->num_ases && ret >= 0);
39
unsigned this_size;
67
- }
40
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
68
- return ret;
41
this_mop |= MO_LE;
69
-}
42
70
+int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
43
r = memory_region_dispatch_write(mr, mr_offset, val_le,
71
44
- this_mop, attrs);
72
#endif /* CONFIG_USER_ONLY */
45
+ this_mop, full->attrs);
73
46
if (unlikely(r != MEMTX_OK)) {
74
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
47
io_failed(env, full, addr, this_size, MMU_DATA_STORE,
75
index XXXXXXX..XXXXXXX 100644
48
mmu_idx, r, ra);
76
--- a/hw/core/cpu-sysemu.c
49
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
77
+++ b/hw/core/cpu-sysemu.c
50
return val_le;
78
@@ -XXX,XX +XXX,XX @@
51
}
79
#include "qapi/error.h"
52
80
#include "hw/core/cpu.h"
53
+static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
81
54
+ uint64_t val_le, vaddr addr, int size,
82
+hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
55
+ int mmu_idx, uintptr_t ra)
83
+ MemTxAttrs *attrs)
84
+{
56
+{
85
+ CPUClass *cc = CPU_GET_CLASS(cpu);
57
+ MemoryRegionSection *section;
58
+ hwaddr mr_offset;
59
+ MemoryRegion *mr;
60
+ MemTxAttrs attrs;
61
+ uint64_t ret;
86
+
62
+
87
+ if (cc->get_phys_page_attrs_debug) {
63
+ tcg_debug_assert(size > 0 && size <= 8);
88
+ return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
89
+ }
90
+ /* Fallback for CPUs which don't implement the _attrs_ hook */
91
+ *attrs = MEMTXATTRS_UNSPECIFIED;
92
+ return cc->get_phys_page_debug(cpu, addr);
93
+}
94
+
64
+
95
+hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
65
+ attrs = full->attrs;
96
+{
66
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
97
+ MemTxAttrs attrs = {};
67
+ mr = section->mr;
98
+
68
+
99
+ return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
69
+ qemu_mutex_lock_iothread();
100
+}
70
+ ret = int_st_mmio_leN(env, full, val_le, addr, size, mmu_idx,
71
+ ra, mr, mr_offset);
72
+ qemu_mutex_unlock_iothread();
101
+
73
+
102
+int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
103
+{
104
+ CPUClass *cc = CPU_GET_CLASS(cpu);
105
+ int ret = 0;
106
+
107
+ if (cc->asidx_from_attrs) {
108
+ ret = cc->asidx_from_attrs(cpu, attrs);
109
+ assert(ret < cpu->num_ases && ret >= 0);
110
+ }
111
+ return ret;
74
+ return ret;
112
+}
75
+}
113
+
76
+
114
GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
77
+static uint64_t do_st16_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
78
+ Int128 val_le, vaddr addr, int size,
79
+ int mmu_idx, uintptr_t ra)
80
+{
81
+ MemoryRegionSection *section;
82
+ MemoryRegion *mr;
83
+ hwaddr mr_offset;
84
+ MemTxAttrs attrs;
85
+ uint64_t ret;
86
+
87
+ tcg_debug_assert(size > 8 && size <= 16);
88
+
89
+ attrs = full->attrs;
90
+ section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
91
+ mr = section->mr;
92
+
93
+ qemu_mutex_lock_iothread();
94
+ int_st_mmio_leN(env, full, int128_getlo(val_le), addr, 8,
95
+ mmu_idx, ra, mr, mr_offset);
96
+ ret = int_st_mmio_leN(env, full, int128_gethi(val_le), addr + 8,
97
+ size - 8, mmu_idx, ra, mr, mr_offset + 8);
98
+ qemu_mutex_unlock_iothread();
99
+
100
+ return ret;
101
+}
102
+
103
/*
104
* Wrapper for the above.
105
*/
106
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
107
unsigned tmp, half_size;
108
109
if (unlikely(p->flags & TLB_MMIO)) {
110
- QEMU_IOTHREAD_LOCK_GUARD();
111
return do_st_mmio_leN(env, p->full, val_le, p->addr,
112
p->size, mmu_idx, ra);
113
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
114
@@ -XXX,XX +XXX,XX @@ static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
115
MemOp atom;
116
117
if (unlikely(p->flags & TLB_MMIO)) {
118
- QEMU_IOTHREAD_LOCK_GUARD();
119
- do_st_mmio_leN(env, p->full, int128_getlo(val_le),
120
- p->addr, 8, mmu_idx, ra);
121
- return do_st_mmio_leN(env, p->full, int128_gethi(val_le),
122
- p->addr + 8, size - 8, mmu_idx, ra);
123
+ return do_st16_mmio_leN(env, p->full, val_le, p->addr,
124
+ size, mmu_idx, ra);
125
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
126
return int128_gethi(val_le) >> ((size - 8) * 8);
127
}
128
@@ -XXX,XX +XXX,XX @@ static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
129
int mmu_idx, uintptr_t ra)
115
{
130
{
116
CPUClass *cc = CPU_GET_CLASS(cpu);
131
if (unlikely(p->flags & TLB_MMIO)) {
132
- QEMU_IOTHREAD_LOCK_GUARD();
133
do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
134
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
135
/* nothing */
136
@@ -XXX,XX +XXX,XX @@ static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
137
if ((memop & MO_BSWAP) != MO_LE) {
138
val = bswap16(val);
139
}
140
- QEMU_IOTHREAD_LOCK_GUARD();
141
do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
142
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
143
/* nothing */
144
@@ -XXX,XX +XXX,XX @@ static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
145
if ((memop & MO_BSWAP) != MO_LE) {
146
val = bswap32(val);
147
}
148
- QEMU_IOTHREAD_LOCK_GUARD();
149
do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
150
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
151
/* nothing */
152
@@ -XXX,XX +XXX,XX @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
153
if ((memop & MO_BSWAP) != MO_LE) {
154
val = bswap64(val);
155
}
156
- QEMU_IOTHREAD_LOCK_GUARD();
157
do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
158
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
159
/* nothing */
160
@@ -XXX,XX +XXX,XX @@ static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
161
if ((l.memop & MO_BSWAP) != MO_LE) {
162
val = bswap128(val);
163
}
164
- a = int128_getlo(val);
165
- b = int128_gethi(val);
166
- QEMU_IOTHREAD_LOCK_GUARD();
167
- do_st_mmio_leN(env, l.page[0].full, a, addr, 8, l.mmu_idx, ra);
168
- do_st_mmio_leN(env, l.page[0].full, b, addr + 8, 8, l.mmu_idx, ra);
169
+ do_st16_mmio_leN(env, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
170
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
171
/* nothing */
172
} else {
117
--
173
--
118
2.25.1
174
2.34.1
119
120
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
2
3
Use uint16_t for (unsigned) 16-bit word.
3
We missed these functions when upstreaming the bfloat16 support.
4
4
5
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
5
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
6
Message-Id: <20210518183655.1711377-6-philmd@redhat.com>
6
Message-Id: <20230531065458.2082-1-zhiwei_liu@linux.alibaba.com>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
7
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
8
---
8
---
9
include/exec/memory_ldst_cached.h.inc | 4 ++--
9
include/fpu/softfloat.h | 12 +++++++++
10
1 file changed, 2 insertions(+), 2 deletions(-)
10
fpu/softfloat.c | 58 +++++++++++++++++++++++++++++++++++++++++
11
2 files changed, 70 insertions(+)
11
12
12
diff --git a/include/exec/memory_ldst_cached.h.inc b/include/exec/memory_ldst_cached.h.inc
13
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
13
index XXXXXXX..XXXXXXX 100644
14
index XXXXXXX..XXXXXXX 100644
14
--- a/include/exec/memory_ldst_cached.h.inc
15
--- a/include/fpu/softfloat.h
15
+++ b/include/exec/memory_ldst_cached.h.inc
16
+++ b/include/fpu/softfloat.h
16
@@ -XXX,XX +XXX,XX @@
17
@@ -XXX,XX +XXX,XX @@ float32 bfloat16_to_float32(bfloat16, float_status *status);
17
#define LD_P(size) \
18
bfloat16 float64_to_bfloat16(float64 a, float_status *status);
18
glue(glue(ld, size), glue(ENDIANNESS, _p))
19
float64 bfloat16_to_float64(bfloat16 a, float_status *status);
19
20
20
-static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
21
+int8_t bfloat16_to_int8_scalbn(bfloat16, FloatRoundMode,
21
+static inline uint16_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
22
+ int, float_status *status);
22
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
23
int16_t bfloat16_to_int16_scalbn(bfloat16, FloatRoundMode,
23
{
24
int, float_status *status);
24
assert(addr < cache->len && 2 <= cache->len - addr);
25
int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
25
@@ -XXX,XX +XXX,XX @@ static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
26
@@ -XXX,XX +XXX,XX @@ int32_t bfloat16_to_int32_scalbn(bfloat16, FloatRoundMode,
26
glue(glue(st, size), glue(ENDIANNESS, _p))
27
int64_t bfloat16_to_int64_scalbn(bfloat16, FloatRoundMode,
27
28
int, float_status *status);
28
static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
29
29
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
30
+int8_t bfloat16_to_int8(bfloat16, float_status *status);
30
+ hwaddr addr, uint16_t val, MemTxAttrs attrs, MemTxResult *result)
31
int16_t bfloat16_to_int16(bfloat16, float_status *status);
31
{
32
int32_t bfloat16_to_int32(bfloat16, float_status *status);
32
assert(addr < cache->len && 2 <= cache->len - addr);
33
int64_t bfloat16_to_int64(bfloat16, float_status *status);
33
if (likely(cache->ptr)) {
34
35
+int8_t bfloat16_to_int8_round_to_zero(bfloat16, float_status *status);
36
int16_t bfloat16_to_int16_round_to_zero(bfloat16, float_status *status);
37
int32_t bfloat16_to_int32_round_to_zero(bfloat16, float_status *status);
38
int64_t bfloat16_to_int64_round_to_zero(bfloat16, float_status *status);
39
40
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode,
41
+ int, float_status *status);
42
uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode,
43
int, float_status *status);
44
uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
45
@@ -XXX,XX +XXX,XX @@ uint32_t bfloat16_to_uint32_scalbn(bfloat16 a, FloatRoundMode,
46
uint64_t bfloat16_to_uint64_scalbn(bfloat16 a, FloatRoundMode,
47
int, float_status *status);
48
49
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *status);
50
uint16_t bfloat16_to_uint16(bfloat16 a, float_status *status);
51
uint32_t bfloat16_to_uint32(bfloat16 a, float_status *status);
52
uint64_t bfloat16_to_uint64(bfloat16 a, float_status *status);
53
54
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *status);
55
uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *status);
56
uint32_t bfloat16_to_uint32_round_to_zero(bfloat16 a, float_status *status);
57
uint64_t bfloat16_to_uint64_round_to_zero(bfloat16 a, float_status *status);
58
59
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int, float_status *status);
60
bfloat16 int16_to_bfloat16_scalbn(int16_t a, int, float_status *status);
61
bfloat16 int32_to_bfloat16_scalbn(int32_t a, int, float_status *status);
62
bfloat16 int64_to_bfloat16_scalbn(int64_t a, int, float_status *status);
63
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int, float_status *status);
64
bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int, float_status *status);
65
bfloat16 uint32_to_bfloat16_scalbn(uint32_t a, int, float_status *status);
66
bfloat16 uint64_to_bfloat16_scalbn(uint64_t a, int, float_status *status);
67
68
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status);
69
bfloat16 int16_to_bfloat16(int16_t a, float_status *status);
70
bfloat16 int32_to_bfloat16(int32_t a, float_status *status);
71
bfloat16 int64_to_bfloat16(int64_t a, float_status *status);
72
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status);
73
bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status);
74
bfloat16 uint32_to_bfloat16(uint32_t a, float_status *status);
75
bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status);
76
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/fpu/softfloat.c
79
+++ b/fpu/softfloat.c
80
@@ -XXX,XX +XXX,XX @@ int64_t float64_to_int64_scalbn(float64 a, FloatRoundMode rmode, int scale,
81
return parts_float_to_sint(&p, rmode, scale, INT64_MIN, INT64_MAX, s);
82
}
83
84
+int8_t bfloat16_to_int8_scalbn(bfloat16 a, FloatRoundMode rmode, int scale,
85
+ float_status *s)
86
+{
87
+ FloatParts64 p;
88
+
89
+ bfloat16_unpack_canonical(&p, a, s);
90
+ return parts_float_to_sint(&p, rmode, scale, INT8_MIN, INT8_MAX, s);
91
+}
92
+
93
int16_t bfloat16_to_int16_scalbn(bfloat16 a, FloatRoundMode rmode, int scale,
94
float_status *s)
95
{
96
@@ -XXX,XX +XXX,XX @@ int64_t floatx80_to_int64_round_to_zero(floatx80 a, float_status *s)
97
return floatx80_to_int64_scalbn(a, float_round_to_zero, 0, s);
98
}
99
100
+int8_t bfloat16_to_int8(bfloat16 a, float_status *s)
101
+{
102
+ return bfloat16_to_int8_scalbn(a, s->float_rounding_mode, 0, s);
103
+}
104
+
105
int16_t bfloat16_to_int16(bfloat16 a, float_status *s)
106
{
107
return bfloat16_to_int16_scalbn(a, s->float_rounding_mode, 0, s);
108
@@ -XXX,XX +XXX,XX @@ int64_t bfloat16_to_int64(bfloat16 a, float_status *s)
109
return bfloat16_to_int64_scalbn(a, s->float_rounding_mode, 0, s);
110
}
111
112
+int8_t bfloat16_to_int8_round_to_zero(bfloat16 a, float_status *s)
113
+{
114
+ return bfloat16_to_int8_scalbn(a, float_round_to_zero, 0, s);
115
+}
116
+
117
int16_t bfloat16_to_int16_round_to_zero(bfloat16 a, float_status *s)
118
{
119
return bfloat16_to_int16_scalbn(a, float_round_to_zero, 0, s);
120
@@ -XXX,XX +XXX,XX @@ uint64_t float64_to_uint64_scalbn(float64 a, FloatRoundMode rmode, int scale,
121
return parts_float_to_uint(&p, rmode, scale, UINT64_MAX, s);
122
}
123
124
+uint8_t bfloat16_to_uint8_scalbn(bfloat16 a, FloatRoundMode rmode,
125
+ int scale, float_status *s)
126
+{
127
+ FloatParts64 p;
128
+
129
+ bfloat16_unpack_canonical(&p, a, s);
130
+ return parts_float_to_uint(&p, rmode, scale, UINT8_MAX, s);
131
+}
132
+
133
uint16_t bfloat16_to_uint16_scalbn(bfloat16 a, FloatRoundMode rmode,
134
int scale, float_status *s)
135
{
136
@@ -XXX,XX +XXX,XX @@ Int128 float128_to_uint128_round_to_zero(float128 a, float_status *s)
137
return float128_to_uint128_scalbn(a, float_round_to_zero, 0, s);
138
}
139
140
+uint8_t bfloat16_to_uint8(bfloat16 a, float_status *s)
141
+{
142
+ return bfloat16_to_uint8_scalbn(a, s->float_rounding_mode, 0, s);
143
+}
144
+
145
uint16_t bfloat16_to_uint16(bfloat16 a, float_status *s)
146
{
147
return bfloat16_to_uint16_scalbn(a, s->float_rounding_mode, 0, s);
148
@@ -XXX,XX +XXX,XX @@ uint64_t bfloat16_to_uint64(bfloat16 a, float_status *s)
149
return bfloat16_to_uint64_scalbn(a, s->float_rounding_mode, 0, s);
150
}
151
152
+uint8_t bfloat16_to_uint8_round_to_zero(bfloat16 a, float_status *s)
153
+{
154
+ return bfloat16_to_uint8_scalbn(a, float_round_to_zero, 0, s);
155
+}
156
+
157
uint16_t bfloat16_to_uint16_round_to_zero(bfloat16 a, float_status *s)
158
{
159
return bfloat16_to_uint16_scalbn(a, float_round_to_zero, 0, s);
160
@@ -XXX,XX +XXX,XX @@ bfloat16 int16_to_bfloat16_scalbn(int16_t a, int scale, float_status *status)
161
return int64_to_bfloat16_scalbn(a, scale, status);
162
}
163
164
+bfloat16 int8_to_bfloat16_scalbn(int8_t a, int scale, float_status *status)
165
+{
166
+ return int64_to_bfloat16_scalbn(a, scale, status);
167
+}
168
+
169
bfloat16 int64_to_bfloat16(int64_t a, float_status *status)
170
{
171
return int64_to_bfloat16_scalbn(a, 0, status);
172
@@ -XXX,XX +XXX,XX @@ bfloat16 int16_to_bfloat16(int16_t a, float_status *status)
173
return int64_to_bfloat16_scalbn(a, 0, status);
174
}
175
176
+bfloat16 int8_to_bfloat16(int8_t a, float_status *status)
177
+{
178
+ return int64_to_bfloat16_scalbn(a, 0, status);
179
+}
180
+
181
float128 int128_to_float128(Int128 a, float_status *status)
182
{
183
FloatParts128 p = { };
184
@@ -XXX,XX +XXX,XX @@ bfloat16 uint16_to_bfloat16_scalbn(uint16_t a, int scale, float_status *status)
185
return uint64_to_bfloat16_scalbn(a, scale, status);
186
}
187
188
+bfloat16 uint8_to_bfloat16_scalbn(uint8_t a, int scale, float_status *status)
189
+{
190
+ return uint64_to_bfloat16_scalbn(a, scale, status);
191
+}
192
+
193
bfloat16 uint64_to_bfloat16(uint64_t a, float_status *status)
194
{
195
return uint64_to_bfloat16_scalbn(a, 0, status);
196
@@ -XXX,XX +XXX,XX @@ bfloat16 uint16_to_bfloat16(uint16_t a, float_status *status)
197
return uint64_to_bfloat16_scalbn(a, 0, status);
198
}
199
200
+bfloat16 uint8_to_bfloat16(uint8_t a, float_status *status)
201
+{
202
+ return uint64_to_bfloat16_scalbn(a, 0, status);
203
+}
204
+
205
float128 uint64_to_float128(uint64_t a, float_status *status)
206
{
207
FloatParts128 p;
34
--
208
--
35
2.25.1
209
2.34.1
36
37
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Motorola treats denormals with explicit integer bit set as
2
having unbiased exponent 0, unlike Intel which treats it as
3
having unbiased exponent 1 (more like all other IEEE formats
4
that have no explicit integer bit).
2
5
3
The current cpu.c contains sysemu-specific methods.
6
Add a flag on FloatFmt to differentiate the behaviour.
4
To avoid building them in user-mode builds, split the
5
current cpu.c as cpu-common.c / cpu-sysemu.c.
6
7
7
Start by moving cpu_get_crash_info().
8
Reported-by: Keith Packard <keithp@keithp.com>
8
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20210517105140.1062037-6-f4bug@amsat.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
11
---
14
hw/core/{cpu.c => cpu-common.c} | 17 -----------------
12
fpu/softfloat.c | 9 +++++-
15
hw/core/cpu-sysemu.c | 34 +++++++++++++++++++++++++++++++++
13
tests/tcg/m68k/denormal.c | 53 ++++++++++++++++++++++++++++++++++
16
hw/core/meson.build | 3 ++-
14
fpu/softfloat-parts.c.inc | 7 +++--
17
3 files changed, 36 insertions(+), 18 deletions(-)
15
tests/tcg/m68k/Makefile.target | 2 +-
18
rename hw/core/{cpu.c => cpu-common.c} (96%)
16
4 files changed, 66 insertions(+), 5 deletions(-)
19
create mode 100644 hw/core/cpu-sysemu.c
17
create mode 100644 tests/tcg/m68k/denormal.c
20
18
21
diff --git a/hw/core/cpu.c b/hw/core/cpu-common.c
19
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
22
similarity index 96%
23
rename from hw/core/cpu.c
24
rename to hw/core/cpu-common.c
25
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/core/cpu.c
21
--- a/fpu/softfloat.c
27
+++ b/hw/core/cpu-common.c
22
+++ b/fpu/softfloat.c
28
@@ -XXX,XX +XXX,XX @@ static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
23
@@ -XXX,XX +XXX,XX @@ typedef struct {
29
return target_words_bigendian();
24
* round_mask: bits below lsb which must be rounded
30
}
25
* The following optional modifiers are available:
31
26
* arm_althp: handle ARM Alternative Half Precision
32
-/*
27
+ * m68k_denormal: explicit integer bit for extended precision may be 1
33
- * XXX the following #if is always true because this is a common_ss
28
*/
34
- * module, so target CONFIG_* is never defined.
29
typedef struct {
35
- */
30
int exp_size;
36
-#if !defined(CONFIG_USER_ONLY)
31
@@ -XXX,XX +XXX,XX @@ typedef struct {
37
-GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
32
int frac_size;
38
-{
33
int frac_shift;
39
- CPUClass *cc = CPU_GET_CLASS(cpu);
34
bool arm_althp;
40
- GuestPanicInformation *res = NULL;
35
+ bool m68k_denormal;
41
-
36
uint64_t round_mask;
42
- if (cc->get_crash_info) {
37
} FloatFmt;
43
- res = cc->get_crash_info(cpu);
38
44
- }
39
@@ -XXX,XX +XXX,XX @@ static const FloatFmt float128_params = {
45
- return res;
40
static const FloatFmt floatx80_params[3] = {
46
-}
41
[floatx80_precision_s] = { FLOATX80_PARAMS(23) },
47
-#endif
42
[floatx80_precision_d] = { FLOATX80_PARAMS(52) },
48
-
43
- [floatx80_precision_x] = { FLOATX80_PARAMS(64) },
49
void cpu_dump_state(CPUState *cpu, FILE *f, int flags)
44
+ [floatx80_precision_x] = {
50
{
45
+ FLOATX80_PARAMS(64),
51
CPUClass *cc = CPU_GET_CLASS(cpu);
46
+#ifdef TARGET_M68K
52
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
47
+ .m68k_denormal = true,
48
+#endif
49
+ },
50
};
51
52
/* Unpack a float to parts, but do not canonicalize. */
53
diff --git a/tests/tcg/m68k/denormal.c b/tests/tcg/m68k/denormal.c
53
new file mode 100644
54
new file mode 100644
54
index XXXXXXX..XXXXXXX
55
index XXXXXXX..XXXXXXX
55
--- /dev/null
56
--- /dev/null
56
+++ b/hw/core/cpu-sysemu.c
57
+++ b/tests/tcg/m68k/denormal.c
57
@@ -XXX,XX +XXX,XX @@
58
@@ -XXX,XX +XXX,XX @@
58
+/*
59
+/*
59
+ * QEMU CPU model (system emulation specific)
60
+ * Test m68k extended double denormals.
60
+ *
61
+ * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
62
+ *
63
+ * This program is free software; you can redistribute it and/or
64
+ * modify it under the terms of the GNU General Public License
65
+ * as published by the Free Software Foundation; either version 2
66
+ * of the License, or (at your option) any later version.
67
+ *
68
+ * This program is distributed in the hope that it will be useful,
69
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
70
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
71
+ * GNU General Public License for more details.
72
+ *
73
+ * You should have received a copy of the GNU General Public License
74
+ * along with this program; if not, see
75
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
76
+ */
61
+ */
77
+
62
+
78
+#include "qemu/osdep.h"
63
+#include <stdio.h>
79
+#include "qapi/error.h"
64
+#include <stdint.h>
80
+#include "hw/core/cpu.h"
81
+
65
+
82
+GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
66
+#define TEST(X, Y) { X, Y, X * Y }
67
+
68
+static volatile long double test[][3] = {
69
+ TEST(0x1p+16383l, 0x1p-16446l),
70
+ TEST(0x1.1p-8223l, 0x1.1p-8224l),
71
+ TEST(1.0l, 0x1p-16383l),
72
+};
73
+
74
+#undef TEST
75
+
76
+static void dump_ld(const char *label, long double ld)
83
+{
77
+{
84
+ CPUClass *cc = CPU_GET_CLASS(cpu);
78
+ union {
85
+ GuestPanicInformation *res = NULL;
79
+ long double d;
80
+ struct {
81
+ uint32_t exp:16;
82
+ uint32_t space:16;
83
+ uint32_t h;
84
+ uint32_t l;
85
+ };
86
+ } u;
86
+
87
+
87
+ if (cc->get_crash_info) {
88
+ u.d = ld;
88
+ res = cc->get_crash_info(cpu);
89
+ printf("%12s: % -27La 0x%04x 0x%08x 0x%08x\n", label, u.d, u.exp, u.h, u.l);
90
+}
91
+
92
+int main(void)
93
+{
94
+ int i, n = sizeof(test) / sizeof(test[0]), err = 0;
95
+
96
+ for (i = 0; i < n; ++i) {
97
+ long double x = test[i][0];
98
+ long double y = test[i][1];
99
+ long double build_mul = test[i][2];
100
+ long double runtime_mul = x * y;
101
+
102
+ if (runtime_mul != build_mul) {
103
+ dump_ld("x", x);
104
+ dump_ld("y", y);
105
+ dump_ld("build_mul", build_mul);
106
+ dump_ld("runtime_mul", runtime_mul);
107
+ err = 1;
108
+ }
89
+ }
109
+ }
90
+ return res;
110
+ return err;
91
+}
111
+}
92
diff --git a/hw/core/meson.build b/hw/core/meson.build
112
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
93
index XXXXXXX..XXXXXXX 100644
113
index XXXXXXX..XXXXXXX 100644
94
--- a/hw/core/meson.build
114
--- a/fpu/softfloat-parts.c.inc
95
+++ b/hw/core/meson.build
115
+++ b/fpu/softfloat-parts.c.inc
96
@@ -XXX,XX +XXX,XX @@ hwcore_files = files(
116
@@ -XXX,XX +XXX,XX @@ static void partsN(canonicalize)(FloatPartsN *p, float_status *status,
97
'qdev-clock.c',
117
} else {
98
)
118
int shift = frac_normalize(p);
99
119
p->cls = float_class_normal;
100
-common_ss.add(files('cpu.c'))
120
- p->exp = fmt->frac_shift - fmt->exp_bias - shift + 1;
101
+common_ss.add(files('cpu-common.c'))
121
+ p->exp = fmt->frac_shift - fmt->exp_bias
102
common_ss.add(when: 'CONFIG_FITLOADER', if_true: files('loader-fit.c'))
122
+ - shift + !fmt->m68k_denormal;
103
common_ss.add(when: 'CONFIG_GENERIC_LOADER', if_true: files('generic-loader.c'))
123
}
104
common_ss.add(when: ['CONFIG_GUEST_LOADER', fdt], if_true: files('guest-loader.c'))
124
} else if (likely(p->exp < fmt->exp_max) || fmt->arm_althp) {
105
@@ -XXX,XX +XXX,XX @@ common_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c'))
125
p->cls = float_class_normal;
106
common_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c'))
126
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
107
127
is_tiny = !frac_addi(&discard, p, inc);
108
softmmu_ss.add(files(
128
}
109
+ 'cpu-sysemu.c',
129
110
'fw-path-provider.c',
130
- frac_shrjam(p, 1 - exp);
111
'loader.c',
131
+ frac_shrjam(p, !fmt->m68k_denormal - exp);
112
'machine-hmp-cmds.c',
132
133
if (p->frac_lo & round_mask) {
134
/* Need to recompute round-to-even/round-to-odd. */
135
@@ -XXX,XX +XXX,XX @@ static void partsN(uncanon_normal)(FloatPartsN *p, float_status *s,
136
p->frac_lo &= ~round_mask;
137
}
138
139
- exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) != 0;
140
+ exp = (p->frac_hi & DECOMPOSED_IMPLICIT_BIT) && !fmt->m68k_denormal;
141
frac_shr(p, frac_shift);
142
143
if (is_tiny && (flags & float_flag_inexact)) {
144
diff --git a/tests/tcg/m68k/Makefile.target b/tests/tcg/m68k/Makefile.target
145
index XXXXXXX..XXXXXXX 100644
146
--- a/tests/tcg/m68k/Makefile.target
147
+++ b/tests/tcg/m68k/Makefile.target
148
@@ -XXX,XX +XXX,XX @@
149
#
150
151
VPATH += $(SRC_PATH)/tests/tcg/m68k
152
-TESTS += trap
153
+TESTS += trap denormal
154
155
# On m68k Linux supports 4k and 8k pages (but 8k is currently broken)
156
EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-8192
113
--
157
--
114
2.25.1
158
2.34.1
115
159
116
160
diff view generated by jsdifflib
1
From: Yasuo Kuwahara <kwhr00@gmail.com>
1
This hook may emit code at the beginning of the TB.
2
2
3
The last argument of tcg_out_extr() must be in the range 0-31 if ext==0.
3
Suggested-by: Jordan Niethe <jniethe5@gmail.com>
4
Before the fix, when m==0 it becomes 32 and it crashes with an Illegal
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
5
instruction on Apple Silicon. After the fix, it will be 0. If m is in
6
the range 1-31, it is the same as before.
7
8
Signed-off-by: Yasuo Kuwahara <kwhr00@gmail.com>
9
Message-Id: <CAHfJ0vSXnmnTLmT0kR=a8ACRdw_UsLYOhStzUzgVEHoH8U-7sA@mail.gmail.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
11
---
6
---
12
tcg/aarch64/tcg-target.c.inc | 5 ++---
7
tcg/tcg.c | 3 +++
13
1 file changed, 2 insertions(+), 3 deletions(-)
8
tcg/aarch64/tcg-target.c.inc | 5 +++++
14
9
tcg/arm/tcg-target.c.inc | 5 +++++
10
tcg/i386/tcg-target.c.inc | 5 +++++
11
tcg/loongarch64/tcg-target.c.inc | 5 +++++
12
tcg/mips/tcg-target.c.inc | 5 +++++
13
tcg/ppc/tcg-target.c.inc | 5 +++++
14
tcg/riscv/tcg-target.c.inc | 5 +++++
15
tcg/s390x/tcg-target.c.inc | 5 +++++
16
tcg/sparc64/tcg-target.c.inc | 5 +++++
17
tcg/tci/tcg-target.c.inc | 5 +++++
18
11 files changed, 53 insertions(+)
19
20
diff --git a/tcg/tcg.c b/tcg/tcg.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/tcg/tcg.c
23
+++ b/tcg/tcg.c
24
@@ -XXX,XX +XXX,XX @@ static void tcg_register_jit_int(const void *buf, size_t size,
25
__attribute__((unused));
26
27
/* Forward declarations for functions declared and used in tcg-target.c.inc. */
28
+static void tcg_out_tb_start(TCGContext *s);
29
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
30
intptr_t arg2);
31
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
32
@@ -XXX,XX +XXX,XX @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
33
s->gen_insn_data =
34
tcg_malloc(sizeof(uint64_t) * s->gen_tb->icount * start_words);
35
36
+ tcg_out_tb_start(s);
37
+
38
num_insns = -1;
39
QTAILQ_FOREACH(op, &s->ops, link) {
40
TCGOpcode opc = op->opc;
15
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
41
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
16
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
17
--- a/tcg/aarch64/tcg-target.c.inc
43
--- a/tcg/aarch64/tcg-target.c.inc
18
+++ b/tcg/aarch64/tcg-target.c.inc
44
+++ b/tcg/aarch64/tcg-target.c.inc
19
@@ -XXX,XX +XXX,XX @@ static inline void tcg_out_rotr(TCGContext *s, TCGType ext,
45
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
20
static inline void tcg_out_rotl(TCGContext *s, TCGType ext,
46
tcg_out_insn(s, 3207, RET, TCG_REG_LR);
21
TCGReg rd, TCGReg rn, unsigned int m)
47
}
22
{
48
23
- int bits = ext ? 64 : 32;
49
+static void tcg_out_tb_start(TCGContext *s)
24
- int max = bits - 1;
50
+{
25
- tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max));
51
+ /* nothing to do */
26
+ int max = ext ? 63 : 31;
52
+}
27
+ tcg_out_extr(s, ext, rd, rn, rn, -m & max);
53
+
28
}
54
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
29
55
{
30
static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
56
int i;
57
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
58
index XXXXXXX..XXXXXXX 100644
59
--- a/tcg/arm/tcg-target.c.inc
60
+++ b/tcg/arm/tcg-target.c.inc
61
@@ -XXX,XX +XXX,XX @@ static void tcg_out_epilogue(TCGContext *s)
62
(1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
63
}
64
65
+static void tcg_out_tb_start(TCGContext *s)
66
+{
67
+ /* nothing to do */
68
+}
69
+
70
typedef struct {
71
DebugFrameHeader h;
72
uint8_t fde_def_cfa[4];
73
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
74
index XXXXXXX..XXXXXXX 100644
75
--- a/tcg/i386/tcg-target.c.inc
76
+++ b/tcg/i386/tcg-target.c.inc
77
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
78
tcg_out_opc(s, OPC_RET, 0, 0, 0);
79
}
80
81
+static void tcg_out_tb_start(TCGContext *s)
82
+{
83
+ /* nothing to do */
84
+}
85
+
86
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
87
{
88
memset(p, 0x90, count);
89
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
90
index XXXXXXX..XXXXXXX 100644
91
--- a/tcg/loongarch64/tcg-target.c.inc
92
+++ b/tcg/loongarch64/tcg-target.c.inc
93
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
94
tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
95
}
96
97
+static void tcg_out_tb_start(TCGContext *s)
98
+{
99
+ /* nothing to do */
100
+}
101
+
102
static void tcg_target_init(TCGContext *s)
103
{
104
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
105
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
106
index XXXXXXX..XXXXXXX 100644
107
--- a/tcg/mips/tcg-target.c.inc
108
+++ b/tcg/mips/tcg-target.c.inc
109
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
110
tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1);
111
}
112
113
+static void tcg_out_tb_start(TCGContext *s)
114
+{
115
+ /* nothing to do */
116
+}
117
+
118
static void tcg_target_init(TCGContext *s)
119
{
120
tcg_target_detect_isa();
121
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
122
index XXXXXXX..XXXXXXX 100644
123
--- a/tcg/ppc/tcg-target.c.inc
124
+++ b/tcg/ppc/tcg-target.c.inc
125
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
126
tcg_out32(s, BCLR | BO_ALWAYS);
127
}
128
129
+static void tcg_out_tb_start(TCGContext *s)
130
+{
131
+ /* nothing to do */
132
+}
133
+
134
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
135
{
136
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
137
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
138
index XXXXXXX..XXXXXXX 100644
139
--- a/tcg/riscv/tcg-target.c.inc
140
+++ b/tcg/riscv/tcg-target.c.inc
141
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
142
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0);
143
}
144
145
+static void tcg_out_tb_start(TCGContext *s)
146
+{
147
+ /* nothing to do */
148
+}
149
+
150
static volatile sig_atomic_t got_sigill;
151
152
static void sigill_handler(int signo, siginfo_t *si, void *data)
153
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
154
index XXXXXXX..XXXXXXX 100644
155
--- a/tcg/s390x/tcg-target.c.inc
156
+++ b/tcg/s390x/tcg-target.c.inc
157
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
158
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
159
}
160
161
+static void tcg_out_tb_start(TCGContext *s)
162
+{
163
+ /* nothing to do */
164
+}
165
+
166
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
167
{
168
memset(p, 0x07, count * sizeof(tcg_insn_unit));
169
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
170
index XXXXXXX..XXXXXXX 100644
171
--- a/tcg/sparc64/tcg-target.c.inc
172
+++ b/tcg/sparc64/tcg-target.c.inc
173
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
174
tcg_out_movi_s13(s, TCG_REG_O0, 0);
175
}
176
177
+static void tcg_out_tb_start(TCGContext *s)
178
+{
179
+ /* nothing to do */
180
+}
181
+
182
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
183
{
184
int i;
185
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
186
index XXXXXXX..XXXXXXX 100644
187
--- a/tcg/tci/tcg-target.c.inc
188
+++ b/tcg/tci/tcg-target.c.inc
189
@@ -XXX,XX +XXX,XX @@ static inline void tcg_target_qemu_prologue(TCGContext *s)
190
{
191
}
192
193
+static void tcg_out_tb_start(TCGContext *s)
194
+{
195
+ /* nothing to do */
196
+}
197
+
198
bool tcg_target_has_memory_bswap(MemOp memop)
199
{
200
return true;
31
--
201
--
32
2.25.1
202
2.34.1
33
203
34
204
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
2
3
Migration is specific to system emulation.
4
5
- Move the CPUClass::vmsd field to SysemuCPUOps,
6
- restrict VMSTATE_CPU() macro to sysemu,
7
- vmstate_dummy is now unused, remove it.
8
9
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
10
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
Message-Id: <20210517105140.1062037-16-f4bug@amsat.org>
12
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
13
---
3
---
14
include/hw/core/cpu.h | 8 ++------
4
host/include/aarch64/host/cpuinfo.h | 1 +
15
include/hw/core/sysemu-cpu-ops.h | 6 ++++++
5
util/cpuinfo-aarch64.c | 7 +++++++
16
include/migration/vmstate.h | 2 --
6
2 files changed, 8 insertions(+)
17
cpu.c | 15 +++++++--------
18
stubs/vmstate.c | 2 --
19
target/arm/cpu.c | 2 +-
20
target/i386/cpu.c | 2 +-
21
target/mips/cpu.c | 2 +-
22
target/ppc/cpu_init.c | 2 +-
23
target/riscv/cpu.c | 2 +-
24
target/s390x/cpu.c | 2 +-
25
target/sparc/cpu.c | 2 +-
26
12 files changed, 22 insertions(+), 25 deletions(-)
27
7
28
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
8
diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h
29
index XXXXXXX..XXXXXXX 100644
9
index XXXXXXX..XXXXXXX 100644
30
--- a/include/hw/core/cpu.h
10
--- a/host/include/aarch64/host/cpuinfo.h
31
+++ b/include/hw/core/cpu.h
11
+++ b/host/include/aarch64/host/cpuinfo.h
32
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
12
@@ -XXX,XX +XXX,XX @@
33
* 32-bit VM coredump.
13
#define CPUINFO_LSE (1u << 1)
34
* @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
14
#define CPUINFO_LSE2 (1u << 2)
35
* note to a 32-bit VM coredump.
15
#define CPUINFO_AES (1u << 3)
36
- * @legacy_vmsd: Legacy state description for migration.
16
+#define CPUINFO_BTI (1u << 4)
37
- * Do not use in new targets, use #DeviceClass::vmsd instead.
17
38
* @gdb_num_core_regs: Number of core registers accessible to GDB.
18
/* Initialized with a constructor. */
39
* @gdb_core_xml_file: File name for core registers GDB XML description.
19
extern unsigned cpuinfo;
40
* @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
20
diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c
41
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
21
index XXXXXXX..XXXXXXX 100644
42
int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
22
--- a/util/cpuinfo-aarch64.c
43
void *opaque);
23
+++ b/util/cpuinfo-aarch64.c
44
24
@@ -XXX,XX +XXX,XX @@
45
- const VMStateDescription *legacy_vmsd;
25
# include <asm/hwcap.h>
46
const char *gdb_core_xml_file;
26
# include "elf.h"
47
gchar * (*gdb_arch_name)(CPUState *cpu);
27
# endif
48
const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
28
+# ifndef HWCAP2_BTI
49
@@ -XXX,XX +XXX,XX @@ bool target_words_bigendian(void);
29
+# define HWCAP2_BTI 0 /* added in glibc 2.32 */
50
#ifdef NEED_CPU_H
30
+# endif
51
31
#endif
52
#ifdef CONFIG_SOFTMMU
32
#ifdef CONFIG_DARWIN
33
# include <sys/sysctl.h>
34
@@ -XXX,XX +XXX,XX @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
35
info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0);
36
info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0);
37
info |= (hwcap & HWCAP_AES ? CPUINFO_AES: 0);
53
+
38
+
54
extern const VMStateDescription vmstate_cpu_common;
39
+ unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
55
-#else
40
+ info |= (hwcap2 & HWCAP2_BTI ? CPUINFO_BTI : 0);
56
-#define vmstate_cpu_common vmstate_dummy
57
-#endif
58
59
#define VMSTATE_CPU() { \
60
.name = "parent_obj", \
61
@@ -XXX,XX +XXX,XX @@ extern const VMStateDescription vmstate_cpu_common;
62
.flags = VMS_STRUCT, \
63
.offset = 0, \
64
}
65
+#endif /* CONFIG_SOFTMMU */
66
67
#endif /* NEED_CPU_H */
68
69
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
70
index XXXXXXX..XXXXXXX 100644
71
--- a/include/hw/core/sysemu-cpu-ops.h
72
+++ b/include/hw/core/sysemu-cpu-ops.h
73
@@ -XXX,XX +XXX,XX @@
74
* struct SysemuCPUOps: System operations specific to a CPU class
75
*/
76
typedef struct SysemuCPUOps {
77
+ /**
78
+ * @legacy_vmsd: Legacy state for migration.
79
+ * Do not use in new targets, use #DeviceClass::vmsd instead.
80
+ */
81
+ const VMStateDescription *legacy_vmsd;
82
+
83
} SysemuCPUOps;
84
85
#endif /* SYSEMU_CPU_OPS_H */
86
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
87
index XXXXXXX..XXXXXXX 100644
88
--- a/include/migration/vmstate.h
89
+++ b/include/migration/vmstate.h
90
@@ -XXX,XX +XXX,XX @@ struct VMStateDescription {
91
const VMStateDescription **subsections;
92
};
93
94
-extern const VMStateDescription vmstate_dummy;
95
-
96
extern const VMStateInfo vmstate_info_bool;
97
98
extern const VMStateInfo vmstate_info_int8;
99
diff --git a/cpu.c b/cpu.c
100
index XXXXXXX..XXXXXXX 100644
101
--- a/cpu.c
102
+++ b/cpu.c
103
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_cpu_common = {
104
105
void cpu_exec_realizefn(CPUState *cpu, Error **errp)
106
{
107
+#ifndef CONFIG_USER_ONLY
108
CPUClass *cc = CPU_GET_CLASS(cpu);
109
+#endif
110
111
cpu_list_add(cpu);
112
if (!accel_cpu_realizefn(cpu, errp)) {
113
@@ -XXX,XX +XXX,XX @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
114
#ifdef CONFIG_USER_ONLY
115
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
116
qdev_get_vmsd(DEVICE(cpu))->unmigratable);
117
- assert(cc->legacy_vmsd == NULL);
118
#else
119
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
120
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
121
}
122
- if (cc->legacy_vmsd != NULL) {
123
- vmstate_register(NULL, cpu->cpu_index, cc->legacy_vmsd, cpu);
124
+ if (cc->sysemu_ops->legacy_vmsd != NULL) {
125
+ vmstate_register(NULL, cpu->cpu_index, cc->sysemu_ops->legacy_vmsd, cpu);
126
}
127
#endif /* CONFIG_USER_ONLY */
128
}
129
130
void cpu_exec_unrealizefn(CPUState *cpu)
131
{
132
+#ifndef CONFIG_USER_ONLY
133
CPUClass *cc = CPU_GET_CLASS(cpu);
134
135
-#ifdef CONFIG_USER_ONLY
136
- assert(cc->legacy_vmsd == NULL);
137
-#else
138
- if (cc->legacy_vmsd != NULL) {
139
- vmstate_unregister(NULL, cc->legacy_vmsd, cpu);
140
+ if (cc->sysemu_ops->legacy_vmsd != NULL) {
141
+ vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
142
}
143
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
144
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
145
diff --git a/stubs/vmstate.c b/stubs/vmstate.c
146
index XXXXXXX..XXXXXXX 100644
147
--- a/stubs/vmstate.c
148
+++ b/stubs/vmstate.c
149
@@ -XXX,XX +XXX,XX @@
150
#include "qemu/osdep.h"
151
#include "migration/vmstate.h"
152
153
-const VMStateDescription vmstate_dummy = {};
154
-
155
int vmstate_register_with_alias_id(VMStateIf *obj,
156
uint32_t instance_id,
157
const VMStateDescription *vmsd,
158
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
159
index XXXXXXX..XXXXXXX 100644
160
--- a/target/arm/cpu.c
161
+++ b/target/arm/cpu.c
162
@@ -XXX,XX +XXX,XX @@ static gchar *arm_gdb_arch_name(CPUState *cs)
163
#include "hw/core/sysemu-cpu-ops.h"
164
165
static const struct SysemuCPUOps arm_sysemu_ops = {
166
+ .legacy_vmsd = &vmstate_arm_cpu,
167
};
168
#endif
41
#endif
169
42
#ifdef CONFIG_DARWIN
170
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
43
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE;
171
#ifndef CONFIG_USER_ONLY
44
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2;
172
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
45
info |= sysctl_for_bool("hw.optional.arm.FEAT_AES") * CPUINFO_AES;
173
cc->asidx_from_attrs = arm_asidx_from_attrs;
46
+ info |= sysctl_for_bool("hw.optional.arm.FEAT_BTI") * CPUINFO_BTI;
174
- cc->legacy_vmsd = &vmstate_arm_cpu;
175
cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
176
cc->write_elf64_note = arm_cpu_write_elf64_note;
177
cc->write_elf32_note = arm_cpu_write_elf32_note;
178
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
179
index XXXXXXX..XXXXXXX 100644
180
--- a/target/i386/cpu.c
181
+++ b/target/i386/cpu.c
182
@@ -XXX,XX +XXX,XX @@ static Property x86_cpu_properties[] = {
183
#include "hw/core/sysemu-cpu-ops.h"
184
185
static const struct SysemuCPUOps i386_sysemu_ops = {
186
+ .legacy_vmsd = &vmstate_x86_cpu,
187
};
188
#endif
47
#endif
189
48
190
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
49
cpuinfo = info;
191
cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
192
cc->write_elf32_note = x86_cpu_write_elf32_note;
193
cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
194
- cc->legacy_vmsd = &vmstate_x86_cpu;
195
cc->sysemu_ops = &i386_sysemu_ops;
196
#endif /* !CONFIG_USER_ONLY */
197
198
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/target/mips/cpu.c
201
+++ b/target/mips/cpu.c
202
@@ -XXX,XX +XXX,XX @@ static Property mips_cpu_properties[] = {
203
#include "hw/core/sysemu-cpu-ops.h"
204
205
static const struct SysemuCPUOps mips_sysemu_ops = {
206
+ .legacy_vmsd = &vmstate_mips_cpu,
207
};
208
#endif
209
210
@@ -XXX,XX +XXX,XX @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
211
cc->gdb_write_register = mips_cpu_gdb_write_register;
212
#ifndef CONFIG_USER_ONLY
213
cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
214
- cc->legacy_vmsd = &vmstate_mips_cpu;
215
cc->sysemu_ops = &mips_sysemu_ops;
216
#endif
217
cc->disas_set_info = mips_cpu_disas_set_info;
218
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
219
index XXXXXXX..XXXXXXX 100644
220
--- a/target/ppc/cpu_init.c
221
+++ b/target/ppc/cpu_init.c
222
@@ -XXX,XX +XXX,XX @@ static Property ppc_cpu_properties[] = {
223
#include "hw/core/sysemu-cpu-ops.h"
224
225
static const struct SysemuCPUOps ppc_sysemu_ops = {
226
+ .legacy_vmsd = &vmstate_ppc_cpu,
227
};
228
#endif
229
230
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
231
cc->gdb_write_register = ppc_cpu_gdb_write_register;
232
#ifndef CONFIG_USER_ONLY
233
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
234
- cc->legacy_vmsd = &vmstate_ppc_cpu;
235
cc->sysemu_ops = &ppc_sysemu_ops;
236
#endif
237
#if defined(CONFIG_SOFTMMU)
238
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
239
index XXXXXXX..XXXXXXX 100644
240
--- a/target/riscv/cpu.c
241
+++ b/target/riscv/cpu.c
242
@@ -XXX,XX +XXX,XX @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
243
#include "hw/core/sysemu-cpu-ops.h"
244
245
static const struct SysemuCPUOps riscv_sysemu_ops = {
246
+ .legacy_vmsd = &vmstate_riscv_cpu,
247
};
248
#endif
249
250
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
251
cc->disas_set_info = riscv_cpu_disas_set_info;
252
#ifndef CONFIG_USER_ONLY
253
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
254
- cc->legacy_vmsd = &vmstate_riscv_cpu;
255
cc->sysemu_ops = &riscv_sysemu_ops;
256
cc->write_elf64_note = riscv_cpu_write_elf64_note;
257
cc->write_elf32_note = riscv_cpu_write_elf32_note;
258
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
259
index XXXXXXX..XXXXXXX 100644
260
--- a/target/s390x/cpu.c
261
+++ b/target/s390x/cpu.c
262
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_full(DeviceState *dev)
263
#include "hw/core/sysemu-cpu-ops.h"
264
265
static const struct SysemuCPUOps s390_sysemu_ops = {
266
+ .legacy_vmsd = &vmstate_s390_cpu,
267
};
268
#endif
269
270
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
271
cc->gdb_write_register = s390_cpu_gdb_write_register;
272
#ifndef CONFIG_USER_ONLY
273
cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
274
- cc->legacy_vmsd = &vmstate_s390_cpu;
275
cc->get_crash_info = s390_cpu_get_crash_info;
276
cc->write_elf64_note = s390_cpu_write_elf64_note;
277
cc->sysemu_ops = &s390_sysemu_ops;
278
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
279
index XXXXXXX..XXXXXXX 100644
280
--- a/target/sparc/cpu.c
281
+++ b/target/sparc/cpu.c
282
@@ -XXX,XX +XXX,XX @@ static Property sparc_cpu_properties[] = {
283
#include "hw/core/sysemu-cpu-ops.h"
284
285
static const struct SysemuCPUOps sparc_sysemu_ops = {
286
+ .legacy_vmsd = &vmstate_sparc_cpu,
287
};
288
#endif
289
290
@@ -XXX,XX +XXX,XX @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
291
cc->gdb_write_register = sparc_cpu_gdb_write_register;
292
#ifndef CONFIG_USER_ONLY
293
cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
294
- cc->legacy_vmsd = &vmstate_sparc_cpu;
295
cc->sysemu_ops = &sparc_sysemu_ops;
296
#endif
297
cc->disas_set_info = cpu_sparc_disas_set_info;
298
--
50
--
299
2.25.1
51
2.34.1
300
52
301
53
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@redhat.com>
1
The prologue is entered via "call"; the epilogue, each tb,
2
and each goto_tb continuation point are all reached via "jump".
2
3
3
To ease the file review, sort the declarations by the size of
4
As tcg_out_goto_long is only used by tcg_out_exit_tb, merge
4
the access (8, 16, 32). Simple code movement, no logical change.
5
the two functions. Change the indirect register used to
6
TCG_REG_TMP1, aka X17, so that the BTI condition created
7
is "jump" instead of "jump or call".
5
8
6
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
9
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
7
Message-Id: <20210518183655.1711377-2-philmd@redhat.com>
8
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
9
---
11
---
10
include/exec/memory_ldst_cached.h.inc | 46 +++++++++++++--------------
12
tcg/aarch64/tcg-target.c.inc | 54 ++++++++++++++++++++++++++----------
11
1 file changed, 23 insertions(+), 23 deletions(-)
13
1 file changed, 39 insertions(+), 15 deletions(-)
12
14
13
diff --git a/include/exec/memory_ldst_cached.h.inc b/include/exec/memory_ldst_cached.h.inc
15
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/include/exec/memory_ldst_cached.h.inc
17
--- a/tcg/aarch64/tcg-target.c.inc
16
+++ b/include/exec/memory_ldst_cached.h.inc
18
+++ b/tcg/aarch64/tcg-target.c.inc
17
@@ -XXX,XX +XXX,XX @@
19
@@ -XXX,XX +XXX,XX @@ typedef enum {
18
#define LD_P(size) \
20
DMB_ISH = 0xd50338bf,
19
glue(glue(ld, size), glue(ENDIANNESS, _p))
21
DMB_LD = 0x00000100,
20
22
DMB_ST = 0x00000200,
21
+static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
23
+
22
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
24
+ BTI_C = 0xd503245f,
25
+ BTI_J = 0xd503249f,
26
+ BTI_JC = 0xd50324df,
27
} AArch64Insn;
28
29
static inline uint32_t tcg_in32(TCGContext *s)
30
@@ -XXX,XX +XXX,XX @@ static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn,
31
| rn << 5 | (rd & 0x1f));
32
}
33
34
+static void tcg_out_bti(TCGContext *s, AArch64Insn insn)
23
+{
35
+{
24
+ assert(addr < cache->len && 2 <= cache->len - addr);
36
+ /*
25
+ fuzz_dma_read_cb(cache->xlat + addr, 2, cache->mrs.mr);
37
+ * While BTI insns are nops on hosts without FEAT_BTI,
26
+ if (likely(cache->ptr)) {
38
+ * there is no point in emitting them in that case either.
27
+ return LD_P(uw)(cache->ptr + addr);
39
+ */
28
+ } else {
40
+ if (cpuinfo & CPUINFO_BTI) {
29
+ return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(cache, addr, attrs, result);
41
+ tcg_out32(s, insn);
30
+ }
42
+ }
31
+}
43
+}
32
+
44
+
33
static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(MemoryRegionCache *cache,
45
/* Register to register move using ORR (shifted register with no shift). */
34
hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
46
static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm)
35
{
47
{
36
@@ -XXX,XX +XXX,XX @@ static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
48
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
37
}
49
tcg_out_insn(s, 3206, B, offset);
38
}
50
}
39
51
40
-static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
52
-static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
41
- hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
42
-{
53
-{
43
- assert(addr < cache->len && 2 <= cache->len - addr);
54
- ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
44
- fuzz_dma_read_cb(cache->xlat + addr, 2, cache->mrs.mr);
55
- if (offset == sextract64(offset, 0, 26)) {
45
- if (likely(cache->ptr)) {
56
- tcg_out_insn(s, 3206, B, offset);
46
- return LD_P(uw)(cache->ptr + addr);
47
- } else {
57
- } else {
48
- return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(cache, addr, attrs, result);
58
- /* Choose X9 as a call-clobbered non-LR temporary. */
59
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X9, (intptr_t)target);
60
- tcg_out_insn(s, 3207, BR, TCG_REG_X9);
49
- }
61
- }
50
-}
62
-}
51
-
63
-
52
#undef ADDRESS_SPACE_LD_CACHED
64
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *target)
53
#undef ADDRESS_SPACE_LD_CACHED_SLOW
54
#undef LD_P
55
@@ -XXX,XX +XXX,XX @@ static inline uint32_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
56
#define ST_P(size) \
57
glue(glue(st, size), glue(ENDIANNESS, _p))
58
59
-static inline void ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache,
60
- hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
61
-{
62
- assert(addr < cache->len && 4 <= cache->len - addr);
63
- if (likely(cache->ptr)) {
64
- ST_P(l)(cache->ptr + addr, val);
65
- } else {
66
- ADDRESS_SPACE_ST_CACHED_SLOW(l)(cache, addr, val, attrs, result);
67
- }
68
-}
69
-
70
static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
71
hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
72
{
65
{
73
@@ -XXX,XX +XXX,XX @@ static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
66
ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
67
@@ -XXX,XX +XXX,XX @@ static const tcg_insn_unit *tb_ret_addr;
68
69
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
70
{
71
+ const tcg_insn_unit *target;
72
+ ptrdiff_t offset;
73
+
74
/* Reuse the zeroing that exists for goto_ptr. */
75
if (a0 == 0) {
76
- tcg_out_goto_long(s, tcg_code_gen_epilogue);
77
+ target = tcg_code_gen_epilogue;
78
} else {
79
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0);
80
- tcg_out_goto_long(s, tb_ret_addr);
81
+ target = tb_ret_addr;
82
+ }
83
+
84
+ offset = tcg_pcrel_diff(s, target) >> 2;
85
+ if (offset == sextract64(offset, 0, 26)) {
86
+ tcg_out_insn(s, 3206, B, offset);
87
+ } else {
88
+ /*
89
+ * Only x16/x17 generate BTI type Jump (2),
90
+ * other registers generate BTI type Jump|Call (3).
91
+ */
92
+ QEMU_BUILD_BUG_ON(TCG_REG_TMP0 != TCG_REG_X16);
93
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, (intptr_t)target);
94
+ tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
74
}
95
}
75
}
96
}
76
97
77
+static inline void ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache,
98
@@ -XXX,XX +XXX,XX @@ static void tcg_out_goto_tb(TCGContext *s, int which)
78
+ hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
99
tcg_out32(s, I3206_B);
79
+{
100
tcg_out_insn(s, 3207, BR, TCG_REG_TMP0);
80
+ assert(addr < cache->len && 4 <= cache->len - addr);
101
set_jmp_reset_offset(s, which);
81
+ if (likely(cache->ptr)) {
102
+ tcg_out_bti(s, BTI_J);
82
+ ST_P(l)(cache->ptr + addr, val);
103
}
83
+ } else {
104
84
+ ADDRESS_SPACE_ST_CACHED_SLOW(l)(cache, addr, val, attrs, result);
105
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
85
+ }
106
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
86
+}
107
{
108
TCGReg r;
109
110
+ tcg_out_bti(s, BTI_C);
87
+
111
+
88
static inline void ADDRESS_SPACE_ST_CACHED(q)(MemoryRegionCache *cache,
112
/* Push (FP, LR) and allocate space for all saved registers. */
89
hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
113
tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR,
114
TCG_REG_SP, -PUSH_SIZE, 1, 1);
115
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
116
* and fall through to the rest of the epilogue.
117
*/
118
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
119
+ tcg_out_bti(s, BTI_J);
120
tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_X0, 0);
121
122
/* TB epilogue */
123
tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
124
+ tcg_out_bti(s, BTI_J);
125
126
/* Remove TCG locals stack space. */
127
tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP,
128
@@ -XXX,XX +XXX,XX @@ static void tcg_target_qemu_prologue(TCGContext *s)
129
130
static void tcg_out_tb_start(TCGContext *s)
90
{
131
{
132
- /* nothing to do */
133
+ tcg_out_bti(s, BTI_J);
134
}
135
136
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
91
--
137
--
92
2.25.1
138
2.34.1
93
94
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <f4bug@amsat.org>
1
For linux aarch64 host supporting BTI, map the buffer
2
to require BTI instructions at branch landing pads.
2
3
3
The write_elf*() handlers are used to dump vmcore images.
4
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
This feature is only meaningful for system emulation.
5
6
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
Message-Id: <20210517105140.1062037-19-f4bug@amsat.org>
9
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
5
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
---
6
---
11
include/hw/core/cpu.h | 17 -----------------
7
tcg/region.c | 41 ++++++++++++++++++++++++++++++-----------
12
include/hw/core/sysemu-cpu-ops.h | 24 ++++++++++++++++++++++++
8
1 file changed, 30 insertions(+), 11 deletions(-)
13
hw/core/cpu-sysemu.c | 16 ++++++++--------
14
target/arm/cpu.c | 4 ++--
15
target/i386/cpu.c | 8 ++++----
16
target/ppc/cpu_init.c | 6 ++----
17
target/riscv/cpu.c | 4 ++--
18
target/s390x/cpu.c | 2 +-
19
8 files changed, 43 insertions(+), 38 deletions(-)
20
9
21
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
10
diff --git a/tcg/region.c b/tcg/region.c
22
index XXXXXXX..XXXXXXX 100644
11
index XXXXXXX..XXXXXXX 100644
23
--- a/include/hw/core/cpu.h
12
--- a/tcg/region.c
24
+++ b/include/hw/core/cpu.h
13
+++ b/tcg/region.c
25
@@ -XXX,XX +XXX,XX @@ struct SysemuCPUOps;
14
@@ -XXX,XX +XXX,XX @@
26
* a memory access with the specified memory transaction attributes.
15
#include "tcg/tcg.h"
27
* @gdb_read_register: Callback for letting GDB read a register.
16
#include "exec/translation-block.h"
28
* @gdb_write_register: Callback for letting GDB write a register.
17
#include "tcg-internal.h"
29
- * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
18
+#include "host/cpuinfo.h"
30
- * 64-bit VM coredump.
19
31
- * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
20
32
- * note to a 32-bit VM coredump.
21
+/*
33
- * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
22
+ * Local source-level compatibility with Unix.
34
- * 32-bit VM coredump.
23
+ * Used by tcg_region_init below.
35
- * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
24
+ */
36
- * note to a 32-bit VM coredump.
25
+#if defined(_WIN32)
37
* @gdb_num_core_regs: Number of core registers accessible to GDB.
26
+#define PROT_READ 1
38
* @gdb_core_xml_file: File name for core registers GDB XML description.
27
+#define PROT_WRITE 2
39
* @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
28
+#define PROT_EXEC 4
40
@@ -XXX,XX +XXX,XX @@ struct CPUClass {
29
+#endif
41
int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
30
+
42
int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
31
struct tcg_region_tree {
43
32
QemuMutex lock;
44
- int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
33
QTree *tree;
45
- int cpuid, void *opaque);
34
@@ -XXX,XX +XXX,XX @@ bool in_code_gen_buffer(const void *p)
46
- int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
35
return (size_t)(p - region.start_aligned) <= region.total_size;
47
- void *opaque);
36
}
48
- int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
37
49
- int cpuid, void *opaque);
38
+#ifndef CONFIG_TCG_INTERPRETER
50
- int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
39
+static int host_prot_read_exec(void)
51
- void *opaque);
40
+{
41
+#if defined(CONFIG_LINUX) && defined(HOST_AARCH64) && defined(PROT_BTI)
42
+ if (cpuinfo & CPUINFO_BTI) {
43
+ return PROT_READ | PROT_EXEC | PROT_BTI;
44
+ }
45
+#endif
46
+ return PROT_READ | PROT_EXEC;
47
+}
48
+#endif
49
+
50
#ifdef CONFIG_DEBUG_TCG
51
const void *tcg_splitwx_to_rx(void *rw)
52
{
53
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
54
return PROT_READ | PROT_WRITE;
55
}
56
#elif defined(_WIN32)
57
-/*
58
- * Local source-level compatibility with Unix.
59
- * Used by tcg_region_init below.
60
- */
61
-#define PROT_READ 1
62
-#define PROT_WRITE 2
63
-#define PROT_EXEC 4
52
-
64
-
53
const char *gdb_core_xml_file;
65
static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
54
gchar * (*gdb_arch_name)(CPUState *cpu);
55
const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
56
diff --git a/include/hw/core/sysemu-cpu-ops.h b/include/hw/core/sysemu-cpu-ops.h
57
index XXXXXXX..XXXXXXX 100644
58
--- a/include/hw/core/sysemu-cpu-ops.h
59
+++ b/include/hw/core/sysemu-cpu-ops.h
60
@@ -XXX,XX +XXX,XX @@ typedef struct SysemuCPUOps {
61
* GUEST_PANICKED events.
62
*/
63
GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
64
+ /**
65
+ * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
66
+ * 32-bit VM coredump.
67
+ */
68
+ int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
69
+ int cpuid, void *opaque);
70
+ /**
71
+ * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
72
+ * 64-bit VM coredump.
73
+ */
74
+ int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
75
+ int cpuid, void *opaque);
76
+ /**
77
+ * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
78
+ * note to a 32-bit VM coredump.
79
+ */
80
+ int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
81
+ void *opaque);
82
+ /**
83
+ * @write_elf64_qemunote: Callback for writing a CPU- and QEMU-specific ELF
84
+ * note to a 64-bit VM coredump.
85
+ */
86
+ int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
87
+ void *opaque);
88
/**
89
* @virtio_is_big_endian: Callback to return %true if a CPU which supports
90
* runtime configurable endianness is currently big-endian.
91
diff --git a/hw/core/cpu-sysemu.c b/hw/core/cpu-sysemu.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/hw/core/cpu-sysemu.c
94
+++ b/hw/core/cpu-sysemu.c
95
@@ -XXX,XX +XXX,XX @@ int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
96
{
66
{
97
CPUClass *cc = CPU_GET_CLASS(cpu);
67
void *buf;
98
68
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
99
- if (!cc->write_elf32_qemunote) {
69
goto fail;
100
+ if (!cc->sysemu_ops->write_elf32_qemunote) {
101
return 0;
102
}
70
}
103
- return (*cc->write_elf32_qemunote)(f, cpu, opaque);
71
104
+ return (*cc->sysemu_ops->write_elf32_qemunote)(f, cpu, opaque);
72
- buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
105
}
73
+ buf_rx = mmap(NULL, size, host_prot_read_exec(), MAP_SHARED, fd, 0);
106
74
if (buf_rx == MAP_FAILED) {
107
int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
75
goto fail_rx;
108
@@ -XXX,XX +XXX,XX @@ int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
76
}
109
{
77
@@ -XXX,XX +XXX,XX @@ static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
110
CPUClass *cc = CPU_GET_CLASS(cpu);
111
112
- if (!cc->write_elf32_note) {
113
+ if (!cc->sysemu_ops->write_elf32_note) {
114
return -1;
78
return -1;
115
}
79
}
116
- return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
80
117
+ return (*cc->sysemu_ops->write_elf32_note)(f, cpu, cpuid, opaque);
81
- if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
118
}
82
+ if (mprotect((void *)buf_rx, size, host_prot_read_exec()) != 0) {
119
83
error_setg_errno(errp, errno, "mprotect for jit splitwx");
120
int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
84
munmap((void *)buf_rx, size);
121
@@ -XXX,XX +XXX,XX @@ int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
85
munmap((void *)buf_rw, size);
122
{
86
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
123
CPUClass *cc = CPU_GET_CLASS(cpu);
87
need_prot = PROT_READ | PROT_WRITE;
124
88
#ifndef CONFIG_TCG_INTERPRETER
125
- if (!cc->write_elf64_qemunote) {
89
if (tcg_splitwx_diff == 0) {
126
+ if (!cc->sysemu_ops->write_elf64_qemunote) {
90
- need_prot |= PROT_EXEC;
127
return 0;
91
+ need_prot |= host_prot_read_exec();
128
}
92
}
129
- return (*cc->write_elf64_qemunote)(f, cpu, opaque);
130
+ return (*cc->sysemu_ops->write_elf64_qemunote)(f, cpu, opaque);
131
}
132
133
int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
134
@@ -XXX,XX +XXX,XX @@ int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
135
{
136
CPUClass *cc = CPU_GET_CLASS(cpu);
137
138
- if (!cc->write_elf64_note) {
139
+ if (!cc->sysemu_ops->write_elf64_note) {
140
return -1;
141
}
142
- return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
143
+ return (*cc->sysemu_ops->write_elf64_note)(f, cpu, cpuid, opaque);
144
}
145
146
bool cpu_virtio_is_big_endian(CPUState *cpu)
147
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
148
index XXXXXXX..XXXXXXX 100644
149
--- a/target/arm/cpu.c
150
+++ b/target/arm/cpu.c
151
@@ -XXX,XX +XXX,XX @@ static gchar *arm_gdb_arch_name(CPUState *cs)
152
#include "hw/core/sysemu-cpu-ops.h"
153
154
static const struct SysemuCPUOps arm_sysemu_ops = {
155
+ .write_elf32_note = arm_cpu_write_elf32_note,
156
+ .write_elf64_note = arm_cpu_write_elf64_note,
157
.virtio_is_big_endian = arm_cpu_virtio_is_big_endian,
158
.legacy_vmsd = &vmstate_arm_cpu,
159
};
160
@@ -XXX,XX +XXX,XX @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
161
#ifndef CONFIG_USER_ONLY
162
cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
163
cc->asidx_from_attrs = arm_asidx_from_attrs;
164
- cc->write_elf64_note = arm_cpu_write_elf64_note;
165
- cc->write_elf32_note = arm_cpu_write_elf32_note;
166
cc->sysemu_ops = &arm_sysemu_ops;
167
#endif
93
#endif
168
cc->gdb_num_core_regs = 26;
94
for (size_t i = 0, n = region.n; i < n; i++) {
169
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
95
@@ -XXX,XX +XXX,XX @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
170
index XXXXXXX..XXXXXXX 100644
96
} else if (need_prot == (PROT_READ | PROT_WRITE)) {
171
--- a/target/i386/cpu.c
97
rc = qemu_mprotect_rw(start, end - start);
172
+++ b/target/i386/cpu.c
98
} else {
173
@@ -XXX,XX +XXX,XX @@ static Property x86_cpu_properties[] = {
99
+#ifdef CONFIG_POSIX
174
100
+ rc = mprotect(start, end - start, need_prot);
175
static const struct SysemuCPUOps i386_sysemu_ops = {
101
+#else
176
.get_crash_info = x86_cpu_get_crash_info,
102
g_assert_not_reached();
177
+ .write_elf32_note = x86_cpu_write_elf32_note,
103
+#endif
178
+ .write_elf64_note = x86_cpu_write_elf64_note,
104
}
179
+ .write_elf32_qemunote = x86_cpu_write_elf32_qemunote,
105
if (rc) {
180
+ .write_elf64_qemunote = x86_cpu_write_elf64_qemunote,
106
error_setg_errno(&error_fatal, errno,
181
.legacy_vmsd = &vmstate_x86_cpu,
182
};
183
#endif
184
@@ -XXX,XX +XXX,XX @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
185
cc->asidx_from_attrs = x86_asidx_from_attrs;
186
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
187
cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
188
- cc->write_elf64_note = x86_cpu_write_elf64_note;
189
- cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
190
- cc->write_elf32_note = x86_cpu_write_elf32_note;
191
- cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
192
cc->sysemu_ops = &i386_sysemu_ops;
193
#endif /* !CONFIG_USER_ONLY */
194
195
diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c
196
index XXXXXXX..XXXXXXX 100644
197
--- a/target/ppc/cpu_init.c
198
+++ b/target/ppc/cpu_init.c
199
@@ -XXX,XX +XXX,XX @@ static Property ppc_cpu_properties[] = {
200
#include "hw/core/sysemu-cpu-ops.h"
201
202
static const struct SysemuCPUOps ppc_sysemu_ops = {
203
+ .write_elf32_note = ppc32_cpu_write_elf32_note,
204
+ .write_elf64_note = ppc64_cpu_write_elf64_note,
205
.virtio_is_big_endian = ppc_cpu_is_big_endian,
206
.legacy_vmsd = &vmstate_ppc_cpu,
207
};
208
@@ -XXX,XX +XXX,XX @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
209
cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
210
cc->sysemu_ops = &ppc_sysemu_ops;
211
#endif
212
-#if defined(CONFIG_SOFTMMU)
213
- cc->write_elf64_note = ppc64_cpu_write_elf64_note;
214
- cc->write_elf32_note = ppc32_cpu_write_elf32_note;
215
-#endif
216
217
cc->gdb_num_core_regs = 71;
218
#ifndef CONFIG_USER_ONLY
219
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
220
index XXXXXXX..XXXXXXX 100644
221
--- a/target/riscv/cpu.c
222
+++ b/target/riscv/cpu.c
223
@@ -XXX,XX +XXX,XX @@ static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
224
#include "hw/core/sysemu-cpu-ops.h"
225
226
static const struct SysemuCPUOps riscv_sysemu_ops = {
227
+ .write_elf64_note = riscv_cpu_write_elf64_note,
228
+ .write_elf32_note = riscv_cpu_write_elf32_note,
229
.legacy_vmsd = &vmstate_riscv_cpu,
230
};
231
#endif
232
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
233
#ifndef CONFIG_USER_ONLY
234
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
235
cc->sysemu_ops = &riscv_sysemu_ops;
236
- cc->write_elf64_note = riscv_cpu_write_elf64_note;
237
- cc->write_elf32_note = riscv_cpu_write_elf32_note;
238
#endif
239
cc->gdb_arch_name = riscv_gdb_arch_name;
240
cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
241
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
242
index XXXXXXX..XXXXXXX 100644
243
--- a/target/s390x/cpu.c
244
+++ b/target/s390x/cpu.c
245
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_reset_full(DeviceState *dev)
246
247
static const struct SysemuCPUOps s390_sysemu_ops = {
248
.get_crash_info = s390_cpu_get_crash_info,
249
+ .write_elf64_note = s390_cpu_write_elf64_note,
250
.legacy_vmsd = &vmstate_s390_cpu,
251
};
252
#endif
253
@@ -XXX,XX +XXX,XX @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
254
cc->gdb_write_register = s390_cpu_gdb_write_register;
255
#ifndef CONFIG_USER_ONLY
256
cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
257
- cc->write_elf64_note = s390_cpu_write_elf64_note;
258
cc->sysemu_ops = &s390_sysemu_ops;
259
#endif
260
cc->disas_set_info = s390_cpu_disas_set_info;
261
--
107
--
262
2.25.1
108
2.34.1
263
109
264
110
diff view generated by jsdifflib